Merge "Move some default-methods tests to Java from Smali."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 22c0e8d..b3832ac 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -258,6 +258,7 @@
   compiler/elf_writer_test.cc \
   compiler/image_test.cc \
   compiler/jni/jni_compiler_test.cc \
+  compiler/linker/multi_oat_relative_patcher_test.cc \
   compiler/linker/output_stream_test.cc \
   compiler/oat_test.cc \
   compiler/optimizing/bounds_check_elimination_test.cc \
@@ -577,9 +578,6 @@
     LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
     LOCAL_MULTILIB := both
     LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn  # gtest issue
-    # clang fails to compile art/runtime/arch/stub_test.cc for arm64 without -O1
-    # b/26275713
-    LOCAL_CLANG_CFLAGS_arm64 += -O1
     include $$(BUILD_EXECUTABLE)
     library_path :=
     2nd_library_path :=
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3f61e8e..7a257b6 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -61,6 +61,7 @@
 	driver/dex_compilation_unit.cc \
 	linker/buffered_output_stream.cc \
 	linker/file_output_stream.cc \
+	linker/multi_oat_relative_patcher.cc \
 	linker/output_stream.cc \
 	linker/vector_output_stream.cc \
 	linker/relative_patcher.cc \
@@ -141,7 +142,9 @@
 	jni/quick/arm64/calling_convention_arm64.cc \
 	linker/arm64/relative_patcher_arm64.cc \
 	optimizing/code_generator_arm64.cc \
+	optimizing/instruction_simplifier_arm.cc \
 	optimizing/instruction_simplifier_arm64.cc \
+	optimizing/instruction_simplifier_shared.cc \
 	optimizing/intrinsics_arm64.cc \
 	utils/arm64/assembler_arm64.cc \
 	utils/arm64/managed_register_arm64.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index e4bfac9..239bc59 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -194,16 +194,15 @@
                                             kind,
                                             isa,
                                             instruction_set_features_.get(),
-                                            true,
+                                            /* boot_image */ true,
                                             GetImageClasses(),
                                             GetCompiledClasses(),
                                             GetCompiledMethods(),
-                                            2,
-                                            true,
-                                            true,
+                                            /* thread_count */ 2,
+                                            /* dump_stats */ true,
+                                            /* dump_passes */ true,
                                             timer_.get(),
-                                            -1,
-                                            /* dex_to_oat_map */ nullptr,
+                                            /* swap_fd */ -1,
                                             GetProfileCompilationInfo()));
   // We typically don't generate an image in unit tests, disable this optimization by default.
   compiler_driver_->SetSupportBootImageFixup(false);
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 8fd20aa..32f624a 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -232,8 +232,7 @@
         // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind().
         // kInRegisterHigh and kInFpuRegisterHigh should be handled by
         // the special cases above and they should not occur alone.
-        LOG(ERROR) << "Unexpected register location kind: "
-                   << DexRegisterLocation::PrettyDescriptor(kind);
+        LOG(ERROR) << "Unexpected register location kind: " << kind;
         break;
       }
       if (is64bitValue) {
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index b78b3d7..8800e4b 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -569,6 +569,7 @@
   kStoreStore,
   kAnyAny,
   kNTStoreStore,
+  kLastBarrierKind = kNTStoreStore
 };
 std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
 
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 209f101..ad4ddad 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -37,6 +37,8 @@
 static constexpr bool kIntrinsicIsStatic[] = {
     true,   // kIntrinsicDoubleCvt
     true,   // kIntrinsicFloatCvt
+    true,   // kIntrinsicFloat2Int
+    true,   // kIntrinsicDouble2Long
     true,   // kIntrinsicFloatIsInfinite
     true,   // kIntrinsicDoubleIsInfinite
     true,   // kIntrinsicFloatIsNaN
@@ -106,6 +108,8 @@
               "arraysize of kIntrinsicIsStatic unexpected");
 static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloat2Int], "Float2Int must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicDouble2Long], "Double2Long must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicFloatIsInfinite], "FloatIsInfinite must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicDoubleIsInfinite], "DoubleIsInfinite must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicFloatIsNaN], "FloatIsNaN must be static");
@@ -277,6 +281,8 @@
     "equals",                // kNameCacheEquals
     "getCharsNoCheck",       // kNameCacheGetCharsNoCheck
     "isEmpty",               // kNameCacheIsEmpty
+    "floatToIntBits",        // kNameCacheFloatToIntBits
+    "doubleToLongBits",      // kNameCacheDoubleToLongBits
     "isInfinite",            // kNameCacheIsInfinite
     "isNaN",                 // kNameCacheIsNaN
     "indexOf",               // kNameCacheIndexOf
@@ -472,6 +478,9 @@
     INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
     INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, kIntrinsicFlagToFloatingPoint),
 
+    INTRINSIC(JavaLangFloat, FloatToIntBits, F_I, kIntrinsicFloat2Int, 0),
+    INTRINSIC(JavaLangDouble, DoubleToLongBits, D_J, kIntrinsicDouble2Long, 0),
+
     INTRINSIC(JavaLangFloat, IsInfinite, F_Z, kIntrinsicFloatIsInfinite, 0),
     INTRINSIC(JavaLangDouble, IsInfinite, D_Z, kIntrinsicDoubleIsInfinite, 0),
     INTRINSIC(JavaLangFloat, IsNaN, F_Z, kIntrinsicFloatIsNaN, 0),
@@ -791,6 +800,8 @@
                                           intrinsic.d.data & kIntrinsicFlagIsOrdered);
     case kIntrinsicSystemArrayCopyCharArray:
       return backend->GenInlinedArrayCopyCharArray(info);
+    case kIntrinsicFloat2Int:
+    case kIntrinsicDouble2Long:
     case kIntrinsicFloatIsInfinite:
     case kIntrinsicDoubleIsInfinite:
     case kIntrinsicFloatIsNaN:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 59b8a53..b465db2 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -190,6 +190,8 @@
       kNameCacheEquals,
       kNameCacheGetCharsNoCheck,
       kNameCacheIsEmpty,
+      kNameCacheFloatToIntBits,
+      kNameCacheDoubleToLongBits,
       kNameCacheIsInfinite,
       kNameCacheIsNaN,
       kNameCacheIndexOf,
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 422d82f..11d0c9a 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1140,6 +1140,12 @@
   RegLocation rl_offset = info->args[0];
   RegLocation rl_count = info->args[1];
   RegLocation rl_data = info->args[2];
+  // No need to emit code checking whether `rl_data` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   CallRuntimeHelperRegLocationRegLocationRegLocation(
       kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
   RegLocation rl_return = GetReturn(kRefReg);
@@ -1357,6 +1363,7 @@
     LoadValueDirectFixed(rl_start, reg_start);
   }
   RegStorage r_tgt = LoadHelper(kQuickIndexOf);
+  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
   GenExplicitNullCheck(reg_ptr, info->opt_flags);
   LIR* high_code_point_branch =
       rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 0cd41bb..6c6c9cf 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -84,17 +84,16 @@
                           Compiler::kQuick,
                           isa,
                           isa_features.get(),
-                          false,
-                          nullptr,
-                          nullptr,
-                          nullptr,
-                          0,
-                          false,
-                          false,
-                          0,
-                          -1,
-                          nullptr,
-                          nullptr);
+                          /* boot_image */ false,
+                          /* image_classes */ nullptr,
+                          /* compiled_classes */ nullptr,
+                          /* compiled_methods */ nullptr,
+                          /* thread_count */ 0,
+                          /* dump_stats */ false,
+                          /* dump_passes */ false,
+                          /* timer */ nullptr,
+                          /* swap_fd */ -1,
+                          /* profile_compilation_info */ nullptr);
     ClassLinker* linker = nullptr;
     CompilationUnit cu(&pool, isa, &driver, linker);
     DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } };  // NOLINT
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index efdc333..ff0ecea 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -64,18 +64,17 @@
         method_inliner_map_.get(),
         Compiler::kQuick,
         isa_,
-        nullptr,
-        false,
-        nullptr,
-        nullptr,
-        nullptr,
-        0,
-        false,
-        false,
-        0,
-        -1,
-        nullptr,
-        nullptr));
+        /* instruction_set_features*/ nullptr,
+        /* boot_image */ false,
+        /* image_classes */ nullptr,
+        /* compiled_classes */ nullptr,
+        /* compiled_methods */ nullptr,
+        /* thread_count */ 0,
+        /* dump_stats */ false,
+        /* dump_passes */ false,
+        /* timer */ nullptr,
+        /* swap_fd */ -1,
+        /* profile_compilation_info */ nullptr));
     cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
     DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
         cu_->arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 2e2d1f9..0695cb5 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -32,19 +32,19 @@
   CompilerDriver driver(&compiler_options,
                         &verification_results,
                         &method_inliner_map,
-                        Compiler::kOptimizing, kNone,
-                        nullptr,
-                        false,
-                        nullptr,
-                        nullptr,
-                        nullptr,
-                        1u,
-                        false,
-                        false,
-                        nullptr,
-                        -1,
-                        nullptr,
-                        nullptr);
+                        Compiler::kOptimizing,
+                        /* instruction_set_ */ kNone,
+                        /* instruction_set_features */ nullptr,
+                        /* boot_image */ false,
+                        /* image_classes */ nullptr,
+                        /* compiled_classes */ nullptr,
+                        /* compiled_methods */ nullptr,
+                        /* thread_count */ 1u,
+                        /* dump_stats */ false,
+                        /* dump_passes */ false,
+                        /* timer */ nullptr,
+                        /* swap_fd */ -1,
+                        /* profile_compilation_info */ nullptr);
   CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
 
   ASSERT_TRUE(storage->DedupeEnabled());  // The default.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index db8c3ab..3100b6d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -342,12 +342,15 @@
     Compiler::Kind compiler_kind,
     InstructionSet instruction_set,
     const InstructionSetFeatures* instruction_set_features,
-    bool boot_image, std::unordered_set<std::string>* image_classes,
+    bool boot_image,
+    std::unordered_set<std::string>* image_classes,
     std::unordered_set<std::string>* compiled_classes,
     std::unordered_set<std::string>* compiled_methods,
-    size_t thread_count, bool dump_stats, bool dump_passes,
-    CumulativeLogger* timer, int swap_fd,
-    const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+    size_t thread_count,
+    bool dump_stats,
+    bool dump_passes,
+    CumulativeLogger* timer,
+    int swap_fd,
     const ProfileCompilationInfo* profile_compilation_info)
     : compiler_options_(compiler_options),
       verification_results_(verification_results),
@@ -374,7 +377,6 @@
       compiler_context_(nullptr),
       support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
       dex_files_for_oat_file_(nullptr),
-      dex_file_oat_filename_map_(dex_to_oat_map),
       compiled_method_storage_(swap_fd),
       profile_compilation_info_(profile_compilation_info) {
   DCHECK(compiler_options_ != nullptr);
@@ -382,7 +384,9 @@
 
   compiler_->Init();
 
-  CHECK_EQ(boot_image_, image_classes_.get() != nullptr);
+  if (boot_image_) {
+    CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image";
+  }
 }
 
 CompilerDriver::~CompilerDriver() {
@@ -866,12 +870,13 @@
 }
 
 bool CompilerDriver::IsImageClass(const char* descriptor) const {
-  if (!IsBootImage()) {
-    // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
-    return true;
-  } else {
+  if (image_classes_ != nullptr) {
+    // If we have a set of image classes, use those.
     return image_classes_->find(descriptor) != image_classes_->end();
   }
+  // No set of image classes, assume we include all the classes.
+  // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
+  return !IsBootImage();
 }
 
 bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
@@ -1077,10 +1082,8 @@
                              image_classes);
     }
     for (auto& m : c->GetVirtualMethods(pointer_size)) {
-      if (m.IsMiranda() || (true)) {
-        StackHandleScope<1> hs2(self);
-        MaybeAddToImageClasses(hs2.NewHandle(m.GetDeclaringClass()), image_classes);
-      }
+      StackHandleScope<1> hs2(self);
+      MaybeAddToImageClasses(hs2.NewHandle(m.GetDeclaringClass()), image_classes);
     }
     if (klass->IsArrayClass()) {
       StackHandleScope<1> hs2(self);
@@ -1677,12 +1680,6 @@
       use_dex_cache = true;
     }
   }
-  if (!use_dex_cache && IsBootImage()) {
-    if (!AreInSameOatFile(&(const_cast<mirror::Class*>(referrer_class)->GetDexFile()),
-                          &declaring_class->GetDexFile())) {
-      use_dex_cache = true;
-    }
-  }
   // The method is defined not within this dex file. We need a dex cache slot within the current
   // dex file or direct pointers.
   bool must_use_direct_pointers = false;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d8f23f7..42a5bc1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,9 +94,11 @@
                  bool boot_image, std::unordered_set<std::string>* image_classes,
                  std::unordered_set<std::string>* compiled_classes,
                  std::unordered_set<std::string>* compiled_methods,
-                 size_t thread_count, bool dump_stats, bool dump_passes,
-                 CumulativeLogger* timer, int swap_fd,
-                 const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+                 size_t thread_count,
+                 bool dump_stats,
+                 bool dump_passes,
+                 CumulativeLogger* timer,
+                 int swap_fd,
                  const ProfileCompilationInfo* profile_compilation_info);
 
   ~CompilerDriver();
@@ -113,20 +115,6 @@
         : ArrayRef<const DexFile* const>();
   }
 
-  // Are the given dex files compiled into the same oat file? Should only be called after
-  // GetDexFilesForOatFile, as the conservative answer (when we don't have a map) is true.
-  bool AreInSameOatFile(const DexFile* d1, const DexFile* d2) {
-    if (dex_file_oat_filename_map_ == nullptr) {
-      // TODO: Check for this wrt/ apps and boot image calls.
-      return true;
-    }
-    auto it1 = dex_file_oat_filename_map_->find(d1);
-    DCHECK(it1 != dex_file_oat_filename_map_->end());
-    auto it2 = dex_file_oat_filename_map_->find(d2);
-    DCHECK(it2 != dex_file_oat_filename_map_->end());
-    return it1->second == it2->second;
-  }
-
   void CompileAll(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
@@ -701,9 +689,6 @@
   // List of dex files that will be stored in the oat file.
   const std::vector<const DexFile*>* dex_files_for_oat_file_;
 
-  // Map from dex files to the oat file (name) they will be compiled into.
-  const std::unordered_map<const DexFile*, const char*>* dex_file_oat_filename_map_;
-
   CompiledMethodStorage compiled_method_storage_;
 
   // Info for profile guided compilation.
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 4785885..0037564 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -250,8 +250,8 @@
     ProfileCompilationInfo info;
     for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
       std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation());
-      profile_info_.AddData(key, dex_file->GetLocationChecksum(), 1);
-      profile_info_.AddData(key, dex_file->GetLocationChecksum(), 2);
+      profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 1);
+      profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 2);
     }
     return &profile_info_;
   }
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index b673eeb..f7da609 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -86,12 +86,24 @@
   // Base class of all sections.
   class Section : public OutputStream {
    public:
-    Section(ElfBuilder<ElfTypes>* owner, const std::string& name,
-            Elf_Word type, Elf_Word flags, const Section* link,
-            Elf_Word info, Elf_Word align, Elf_Word entsize)
-        : OutputStream(name), owner_(owner), header_(),
-          section_index_(0), name_(name), link_(link),
-          started_(false), finished_(false), phdr_flags_(PF_R), phdr_type_(0) {
+    Section(ElfBuilder<ElfTypes>* owner,
+            const std::string& name,
+            Elf_Word type,
+            Elf_Word flags,
+            const Section* link,
+            Elf_Word info,
+            Elf_Word align,
+            Elf_Word entsize)
+        : OutputStream(name),
+          owner_(owner),
+          header_(),
+          section_index_(0),
+          name_(name),
+          link_(link),
+          started_(false),
+          finished_(false),
+          phdr_flags_(PF_R),
+          phdr_type_(0) {
       DCHECK_GE(align, 1u);
       header_.sh_type = type;
       header_.sh_flags = flags;
@@ -228,12 +240,84 @@
     DISALLOW_COPY_AND_ASSIGN(Section);
   };
 
-  // Writer of .dynstr .strtab and .shstrtab sections.
+  class CachedSection : public Section {
+   public:
+    CachedSection(ElfBuilder<ElfTypes>* owner,
+                  const std::string& name,
+                  Elf_Word type,
+                  Elf_Word flags,
+                  const Section* link,
+                  Elf_Word info,
+                  Elf_Word align,
+                  Elf_Word entsize)
+        : Section(owner, name, type, flags, link, info, align, entsize), cache_() { }
+
+    Elf_Word Add(const void* data, size_t length) {
+      Elf_Word offset = cache_.size();
+      const uint8_t* d = reinterpret_cast<const uint8_t*>(data);
+      cache_.insert(cache_.end(), d, d + length);
+      return offset;
+    }
+
+    Elf_Word GetCacheSize() {
+      return cache_.size();
+    }
+
+    void Write() {
+      this->WriteFully(cache_.data(), cache_.size());
+      cache_.clear();
+      cache_.shrink_to_fit();
+    }
+
+    void WriteCachedSection() {
+      this->Start();
+      Write();
+      this->End();
+    }
+
+   private:
+    std::vector<uint8_t> cache_;
+  };
+
+  // Writer of .dynstr section.
+  class CachedStringSection FINAL : public CachedSection {
+   public:
+    CachedStringSection(ElfBuilder<ElfTypes>* owner,
+                        const std::string& name,
+                        Elf_Word flags,
+                        Elf_Word align)
+        : CachedSection(owner,
+                        name,
+                        SHT_STRTAB,
+                        flags,
+                        /* link */ nullptr,
+                        /* info */ 0,
+                        align,
+                        /* entsize */ 0) { }
+
+    Elf_Word Add(const std::string& name) {
+      if (CachedSection::GetCacheSize() == 0u) {
+        DCHECK(name.empty());
+      }
+      return CachedSection::Add(name.c_str(), name.length() + 1);
+    }
+  };
+
+  // Writer of .strtab and .shstrtab sections.
   class StringSection FINAL : public Section {
    public:
-    StringSection(ElfBuilder<ElfTypes>* owner, const std::string& name,
-                  Elf_Word flags, Elf_Word align)
-        : Section(owner, name, SHT_STRTAB, flags, nullptr, 0, align, 0),
+    StringSection(ElfBuilder<ElfTypes>* owner,
+                  const std::string& name,
+                  Elf_Word flags,
+                  Elf_Word align)
+        : Section(owner,
+                  name,
+                  SHT_STRTAB,
+                  flags,
+                  /* link */ nullptr,
+                  /* info */ 0,
+                  align,
+                  /* entsize */ 0),
           current_offset_(0) {
     }
 
@@ -252,42 +336,60 @@
   };
 
   // Writer of .dynsym and .symtab sections.
-  class SymbolSection FINAL : public Section {
+  class SymbolSection FINAL : public CachedSection {
    public:
-    SymbolSection(ElfBuilder<ElfTypes>* owner, const std::string& name,
-                  Elf_Word type, Elf_Word flags, StringSection* strtab)
-        : Section(owner, name, type, flags, strtab, 0,
-                  sizeof(Elf_Off), sizeof(Elf_Sym)) {
+    SymbolSection(ElfBuilder<ElfTypes>* owner,
+                  const std::string& name,
+                  Elf_Word type,
+                  Elf_Word flags,
+                  Section* strtab)
+        : CachedSection(owner,
+                        name,
+                        type,
+                        flags,
+                        strtab,
+                        /* info */ 0,
+                        sizeof(Elf_Off),
+                        sizeof(Elf_Sym)) {
+      // The symbol table always has to start with NULL symbol.
+      Elf_Sym null_symbol = Elf_Sym();
+      CachedSection::Add(&null_symbol, sizeof(null_symbol));
     }
 
     // Buffer symbol for this section.  It will be written later.
     // If the symbol's section is null, it will be considered absolute (SHN_ABS).
     // (we use this in JIT to reference code which is stored outside the debug ELF file)
-    void Add(Elf_Word name, const Section* section,
-             Elf_Addr addr, bool is_relative, Elf_Word size,
-             uint8_t binding, uint8_t type, uint8_t other = 0) {
+    void Add(Elf_Word name,
+             const Section* section,
+             Elf_Addr addr,
+             bool is_relative,
+             Elf_Word size,
+             uint8_t binding,
+             uint8_t type,
+             uint8_t other = 0) {
+      DCHECK(section != nullptr || !is_relative);
+      Elf_Addr abs_addr = addr + (is_relative ? section->GetAddress() : 0);
+      Elf_Word section_index =
+          (section != nullptr) ? section->GetSectionIndex() : static_cast<Elf_Word>(SHN_ABS);
+      Add(name, section_index, abs_addr, size, binding, type, other);
+    }
+
+    void Add(Elf_Word name,
+             Elf_Word section_index,
+             Elf_Addr addr,
+             Elf_Word size,
+             uint8_t binding,
+             uint8_t type,
+             uint8_t other = 0) {
       Elf_Sym sym = Elf_Sym();
       sym.st_name = name;
-      sym.st_value = addr + (is_relative ? section->GetAddress() : 0);
+      sym.st_value = addr;
       sym.st_size = size;
       sym.st_other = other;
-      sym.st_shndx = (section != nullptr ? section->GetSectionIndex()
-                                         : static_cast<Elf_Word>(SHN_ABS));
+      sym.st_shndx = section_index;
       sym.st_info = (binding << 4) + (type & 0xf);
-      symbols_.push_back(sym);
+      CachedSection::Add(&sym, sizeof(sym));
     }
-
-    void Write() {
-      // The symbol table always has to start with NULL symbol.
-      Elf_Sym null_symbol = Elf_Sym();
-      this->WriteFully(&null_symbol, sizeof(null_symbol));
-      this->WriteFully(symbols_.data(), symbols_.size() * sizeof(symbols_[0]));
-      symbols_.clear();
-      symbols_.shrink_to_fit();
-    }
-
-   private:
-    std::vector<Elf_Sym> symbols_;
   };
 
   ElfBuilder(InstructionSet isa, OutputStream* output)
@@ -309,6 +411,8 @@
         debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
         shstrtab_(this, ".shstrtab", 0, 1),
         started_(false),
+        write_program_headers_(false),
+        loaded_size_(0u),
         virtual_address_(0) {
     text_.phdr_flags_ = PF_R | PF_X;
     bss_.phdr_flags_ = PF_R | PF_W;
@@ -380,6 +484,14 @@
   void End() {
     DCHECK(started_);
 
+    // Note: loaded_size_ == 0 for tests that don't write .rodata, .text, .bss,
+    // .dynstr, dynsym, .hash and .dynamic. These tests should not read loaded_size_.
+    // TODO: Either refactor the .eh_frame creation so that it counts towards loaded_size_,
+    // or remove all support for .eh_frame. (The currently unused .eh_frame counts towards
+    // the virtual_address_ but we don't consider it for loaded_size_.)
+    CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize))
+        << loaded_size_ << " " << virtual_address_;
+
     // Write section names and finish the section headers.
     shstrtab_.Start();
     shstrtab_.Write("");
@@ -434,45 +546,58 @@
   // information like the address and size of .rodata and .text.
   // It also contains other metadata like the SONAME.
   // The .dynamic section is found using the PT_DYNAMIC program header.
-  void WriteDynamicSection(const std::string& elf_file_path) {
+  void PrepareDynamicSection(const std::string& elf_file_path,
+                             Elf_Word rodata_size,
+                             Elf_Word text_size,
+                             Elf_Word bss_size) {
     std::string soname(elf_file_path);
     size_t directory_separator_pos = soname.rfind('/');
     if (directory_separator_pos != std::string::npos) {
       soname = soname.substr(directory_separator_pos + 1);
     }
 
-    dynstr_.Start();
-    dynstr_.Write("");  // dynstr should start with empty string.
-    dynsym_.Add(dynstr_.Write("oatdata"), &rodata_, 0, true,
-                rodata_.GetSize(), STB_GLOBAL, STT_OBJECT);
-    if (text_.GetSize() != 0u) {
-      dynsym_.Add(dynstr_.Write("oatexec"), &text_, 0, true,
-                  text_.GetSize(), STB_GLOBAL, STT_OBJECT);
-      dynsym_.Add(dynstr_.Write("oatlastword"), &text_, text_.GetSize() - 4,
-                  true, 4, STB_GLOBAL, STT_OBJECT);
-    } else if (rodata_.GetSize() != 0) {
-      // rodata_ can be size 0 for dwarf_test.
-      dynsym_.Add(dynstr_.Write("oatlastword"), &rodata_, rodata_.GetSize() - 4,
-                  true, 4, STB_GLOBAL, STT_OBJECT);
-    }
-    if (bss_.finished_) {
-      dynsym_.Add(dynstr_.Write("oatbss"), &bss_,
-                  0, true, bss_.GetSize(), STB_GLOBAL, STT_OBJECT);
-      dynsym_.Add(dynstr_.Write("oatbsslastword"), &bss_,
-                  bss_.GetSize() - 4, true, 4, STB_GLOBAL, STT_OBJECT);
-    }
-    Elf_Word soname_offset = dynstr_.Write(soname);
-    dynstr_.End();
+    // Calculate addresses of .text, .bss and .dynstr.
+    DCHECK_EQ(rodata_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
+    DCHECK_EQ(text_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
+    DCHECK_EQ(bss_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
+    DCHECK_EQ(dynstr_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
+    Elf_Word rodata_address = rodata_.GetAddress();
+    Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize);
+    Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize);
+    Elf_Word dynstr_address = RoundUp(bss_address + bss_size, kPageSize);
 
-    dynsym_.Start();
-    dynsym_.Write();
-    dynsym_.End();
+    // Cache .dynstr, .dynsym and .hash data.
+    dynstr_.Add("");  // dynstr should start with empty string.
+    Elf_Word rodata_index = rodata_.GetSectionIndex();
+    Elf_Word oatdata = dynstr_.Add("oatdata");
+    dynsym_.Add(oatdata, rodata_index, rodata_address, rodata_size, STB_GLOBAL, STT_OBJECT);
+    if (text_size != 0u) {
+      Elf_Word text_index = rodata_index + 1u;
+      Elf_Word oatexec = dynstr_.Add("oatexec");
+      dynsym_.Add(oatexec, text_index, text_address, text_size, STB_GLOBAL, STT_OBJECT);
+      Elf_Word oatlastword = dynstr_.Add("oatlastword");
+      Elf_Word oatlastword_address = text_address + text_size - 4;
+      dynsym_.Add(oatlastword, text_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
+    } else if (rodata_size != 0) {
+      // rodata_ can be size 0 for dwarf_test.
+      Elf_Word oatlastword = dynstr_.Add("oatlastword");
+      Elf_Word oatlastword_address = rodata_address + rodata_size - 4;
+      dynsym_.Add(oatlastword, rodata_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
+    }
+    if (bss_size != 0u) {
+      Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u);
+      Elf_Word oatbss = dynstr_.Add("oatbss");
+      dynsym_.Add(oatbss, bss_index, bss_address, bss_size, STB_GLOBAL, STT_OBJECT);
+      Elf_Word oatbsslastword = dynstr_.Add("oatbsslastword");
+      Elf_Word bsslastword_address = bss_address + bss_size - 4;
+      dynsym_.Add(oatbsslastword, bss_index, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT);
+    }
+    Elf_Word soname_offset = dynstr_.Add(soname);
 
     // We do not really need a hash-table since there is so few entries.
     // However, the hash-table is the only way the linker can actually
     // determine the number of symbols in .dynsym so it is required.
-    hash_.Start();
-    int count = dynsym_.GetSize() / sizeof(Elf_Sym);  // Includes NULL.
+    int count = dynsym_.GetCacheSize() / sizeof(Elf_Sym);  // Includes NULL.
     std::vector<Elf_Word> hash;
     hash.push_back(1);  // Number of buckets.
     hash.push_back(count);  // Number of chains.
@@ -484,21 +609,44 @@
       hash.push_back(i + 1);  // Each symbol points to the next one.
     }
     hash.push_back(0);  // Last symbol terminates the chain.
-    hash_.WriteFully(hash.data(), hash.size() * sizeof(hash[0]));
-    hash_.End();
+    hash_.Add(hash.data(), hash.size() * sizeof(hash[0]));
 
-    dynamic_.Start();
+    // Calculate addresses of .dynsym, .hash and .dynamic.
+    DCHECK_EQ(dynstr_.header_.sh_flags, dynsym_.header_.sh_flags);
+    DCHECK_EQ(dynsym_.header_.sh_flags, hash_.header_.sh_flags);
+    Elf_Word dynsym_address =
+        RoundUp(dynstr_address + dynstr_.GetCacheSize(), dynsym_.header_.sh_addralign);
+    Elf_Word hash_address =
+        RoundUp(dynsym_address + dynsym_.GetCacheSize(), hash_.header_.sh_addralign);
+    DCHECK_EQ(dynamic_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
+    Elf_Word dynamic_address = RoundUp(hash_address + dynsym_.GetCacheSize(), kPageSize);
+
     Elf_Dyn dyns[] = {
-      { DT_HASH, { hash_.GetAddress() } },
-      { DT_STRTAB, { dynstr_.GetAddress() } },
-      { DT_SYMTAB, { dynsym_.GetAddress() } },
+      { DT_HASH, { hash_address } },
+      { DT_STRTAB, { dynstr_address } },
+      { DT_SYMTAB, { dynsym_address } },
       { DT_SYMENT, { sizeof(Elf_Sym) } },
-      { DT_STRSZ, { dynstr_.GetSize() } },
+      { DT_STRSZ, { dynstr_.GetCacheSize() } },
       { DT_SONAME, { soname_offset } },
       { DT_NULL, { 0 } },
     };
-    dynamic_.WriteFully(&dyns, sizeof(dyns));
-    dynamic_.End();
+    dynamic_.Add(&dyns, sizeof(dyns));
+
+    loaded_size_ = RoundUp(dynamic_address + dynamic_.GetCacheSize(), kPageSize);
+  }
+
+  void WriteDynamicSection() {
+    dynstr_.WriteCachedSection();
+    dynsym_.WriteCachedSection();
+    hash_.WriteCachedSection();
+    dynamic_.WriteCachedSection();
+
+    CHECK_EQ(loaded_size_, RoundUp(dynamic_.GetAddress() + dynamic_.GetSize(), kPageSize));
+  }
+
+  Elf_Word GetLoadedSize() {
+    CHECK_NE(loaded_size_, 0u);
+    return loaded_size_;
   }
 
   // Returns true if all writes and seeks on the output stream succeeded.
@@ -676,10 +824,10 @@
   Section rodata_;
   Section text_;
   Section bss_;
-  StringSection dynstr_;
+  CachedStringSection dynstr_;
   SymbolSection dynsym_;
-  Section hash_;
-  Section dynamic_;
+  CachedSection hash_;
+  CachedSection dynamic_;
   Section eh_frame_;
   Section eh_frame_hdr_;
   StringSection strtab_;
@@ -694,12 +842,14 @@
   std::vector<Section*> sections_;
 
   bool started_;
+  bool write_program_headers_;
+
+  // The size of the memory taken by the ELF file when loaded.
+  size_t loaded_size_;
 
   // Used for allocation of virtual address space.
   Elf_Addr virtual_address_;
 
-  size_t write_program_headers_;
-
   DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
 };
 
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index d50a08c..c9ea0083 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -52,14 +52,12 @@
   virtual ~ElfWriter() {}
 
   virtual void Start() = 0;
-  virtual void PrepareDebugInfo(size_t rodata_section_size,
-                                size_t text_section_size,
-                                const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
+  virtual void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) = 0;
+  virtual void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
   virtual OutputStream* StartRoData() = 0;
   virtual void EndRoData(OutputStream* rodata) = 0;
   virtual OutputStream* StartText() = 0;
   virtual void EndText(OutputStream* text) = 0;
-  virtual void SetBssSize(size_t bss_size) = 0;
   virtual void WriteDynamicSection() = 0;
   virtual void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
   virtual void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) = 0;
@@ -70,6 +68,9 @@
   // should Seek() back to the position where the stream was before this operation.
   virtual OutputStream* GetStream() = 0;
 
+  // Get the size that the loaded ELF file will occupy in memory.
+  virtual size_t GetLoadedSize() = 0;
+
  protected:
   ElfWriter() = default;
 };
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 1d71e57..19346ec 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -88,14 +88,12 @@
   ~ElfWriterQuick();
 
   void Start() OVERRIDE;
-  void PrepareDebugInfo(size_t rodata_section_size,
-                        size_t text_section_size,
-                        const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
+  void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) OVERRIDE;
+  void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
   OutputStream* StartRoData() OVERRIDE;
   void EndRoData(OutputStream* rodata) OVERRIDE;
   OutputStream* StartText() OVERRIDE;
   void EndText(OutputStream* text) OVERRIDE;
-  void SetBssSize(size_t bss_size) OVERRIDE;
   void WriteDynamicSection() OVERRIDE;
   void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
   void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) OVERRIDE;
@@ -103,12 +101,17 @@
 
   virtual OutputStream* GetStream() OVERRIDE;
 
+  size_t GetLoadedSize() OVERRIDE;
+
   static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
                                std::vector<uint8_t>* buffer);
 
  private:
   const CompilerOptions* const compiler_options_;
   File* const elf_file_;
+  size_t rodata_size_;
+  size_t text_size_;
+  size_t bss_size_;
   std::unique_ptr<BufferedOutputStream> output_stream_;
   std::unique_ptr<ElfBuilder<ElfTypes>> builder_;
   std::unique_ptr<DebugInfoTask> debug_info_task_;
@@ -134,6 +137,9 @@
     : ElfWriter(),
       compiler_options_(compiler_options),
       elf_file_(elf_file),
+      rodata_size_(0u),
+      text_size_(0u),
+      bss_size_(0u),
       output_stream_(MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file))),
       builder_(new ElfBuilder<ElfTypes>(instruction_set, output_stream_.get())) {}
 
@@ -146,6 +152,19 @@
 }
 
 template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::SetLoadedSectionSizes(size_t rodata_size,
+                                                     size_t text_size,
+                                                     size_t bss_size) {
+  DCHECK_EQ(rodata_size_, 0u);
+  rodata_size_ = rodata_size;
+  DCHECK_EQ(text_size_, 0u);
+  text_size_ = text_size;
+  DCHECK_EQ(bss_size_, 0u);
+  bss_size_ = bss_size;
+  builder_->PrepareDynamicSection(elf_file_->GetPath(), rodata_size_, text_size_, bss_size_);
+}
+
+template <typename ElfTypes>
 OutputStream* ElfWriterQuick<ElfTypes>::StartRoData() {
   auto* rodata = builder_->GetRoData();
   rodata->Start();
@@ -172,31 +191,21 @@
 }
 
 template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::SetBssSize(size_t bss_size) {
-  auto* bss = builder_->GetBss();
-  if (bss_size != 0u) {
-    bss->WriteNoBitsSection(bss_size);
-  }
-}
-
-template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
-  builder_->WriteDynamicSection(elf_file_->GetPath());
+  if (bss_size_ != 0u) {
+    builder_->GetBss()->WriteNoBitsSection(bss_size_);
+  }
+  builder_->WriteDynamicSection();
 }
 
 template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
-    size_t rodata_section_size,
-    size_t text_section_size,
     const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
   if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
     // Prepare the mini-debug-info in background while we do other I/O.
     Thread* self = Thread::Current();
     debug_info_task_ = std::unique_ptr<DebugInfoTask>(
-        new DebugInfoTask(builder_->GetIsa(),
-                          rodata_section_size,
-                          text_section_size,
-                          method_infos));
+        new DebugInfoTask(builder_->GetIsa(), rodata_size_, text_size_, method_infos));
     debug_info_thread_pool_ = std::unique_ptr<ThreadPool>(
         new ThreadPool("Mini-debug-info writer", 1));
     debug_info_thread_pool_->AddTask(self, debug_info_task_.get());
@@ -245,6 +254,11 @@
   return builder_->GetStream();
 }
 
+template <typename ElfTypes>
+size_t ElfWriterQuick<ElfTypes>::GetLoadedSize() {
+  return builder_->GetLoadedSize();
+}
+
 // Explicit instantiations
 template class ElfWriterQuick<ElfTypes32>;
 template class ElfWriterQuick<ElfTypes64>;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 4920f9b..5763cec 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -28,6 +28,7 @@
 #include "elf_writer_quick.h"
 #include "gc/space/image_space.h"
 #include "image_writer.h"
+#include "linker/multi_oat_relative_patcher.h"
 #include "lock_word.h"
 #include "mirror/object-inl.h"
 #include "oat_writer.h"
@@ -72,10 +73,10 @@
   ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
 
   const uintptr_t requested_image_base = ART_BASE_ADDRESS;
-  std::unordered_map<const DexFile*, const char*> dex_file_to_oat_filename_map;
+  std::unordered_map<const DexFile*, size_t> dex_file_to_oat_index_map;
   std::vector<const char*> oat_filename_vector(1, oat_filename.c_str());
   for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
-    dex_file_to_oat_filename_map.emplace(dex_file, oat_filename.c_str());
+    dex_file_to_oat_index_map.emplace(dex_file, 0);
   }
   std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_,
                                                       requested_image_base,
@@ -83,7 +84,7 @@
                                                       /*compile_app_image*/false,
                                                       storage_mode,
                                                       oat_filename_vector,
-                                                      dex_file_to_oat_filename_map));
+                                                      dex_file_to_oat_index_map));
   // TODO: compile_pic should be a test argument.
   {
     {
@@ -123,10 +124,22 @@
           &opened_dex_files_map,
           &opened_dex_files);
       ASSERT_TRUE(dex_files_ok);
-      oat_writer.PrepareLayout(compiler_driver_.get(), writer.get(), dex_files);
+
       bool image_space_ok = writer->PrepareImageAddressSpace();
       ASSERT_TRUE(image_space_ok);
 
+      linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
+                                              instruction_set_features_.get());
+      oat_writer.PrepareLayout(compiler_driver_.get(), writer.get(), dex_files, &patcher);
+      size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
+      size_t text_size = oat_writer.GetSize() - rodata_size;
+      elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer.GetBssSize());
+
+      writer->UpdateOatFileLayout(/* oat_index */ 0u,
+                                  elf_writer->GetLoadedSize(),
+                                  oat_writer.GetOatDataOffset(),
+                                  oat_writer.GetSize());
+
       bool rodata_ok = oat_writer.WriteRodata(rodata);
       ASSERT_TRUE(rodata_ok);
       elf_writer->EndRoData(rodata);
@@ -139,13 +152,13 @@
       bool header_ok = oat_writer.WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
       ASSERT_TRUE(header_ok);
 
-      elf_writer->SetBssSize(oat_writer.GetBssSize());
+      writer->UpdateOatFileHeader(/* oat_index */ 0u, oat_writer.GetOatHeader());
+
       elf_writer->WriteDynamicSection();
       elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
       elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
 
       bool success = elf_writer->End();
-
       ASSERT_TRUE(success);
     }
   }
@@ -158,12 +171,10 @@
     std::vector<const char*> dup_image_filename(1, image_file.GetFilename().c_str());
     bool success_image = writer->Write(kInvalidFd,
                                        dup_image_filename,
-                                       kInvalidFd,
-                                       dup_oat_filename,
-                                       dup_oat_filename[0]);
+                                       dup_oat_filename);
     ASSERT_TRUE(success_image);
     bool success_fixup = ElfWriter::Fixup(dup_oat.get(),
-                                          writer->GetOatDataBegin(dup_oat_filename[0]));
+                                          writer->GetOatDataBegin(0));
     ASSERT_TRUE(success_fixup);
 
     ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
@@ -278,6 +289,11 @@
   TestWriteRead(ImageHeader::kStorageModeLZ4);
 }
 
+TEST_F(ImageTest, WriteReadLZ4HC) {
+  TestWriteRead(ImageHeader::kStorageModeLZ4HC);
+}
+
+
 TEST_F(ImageTest, ImageHeaderIsValid) {
     uint32_t image_begin = ART_BASE_ADDRESS;
     uint32_t image_size_ = 16 * KB;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d50528e..871435b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -18,6 +18,7 @@
 
 #include <sys/stat.h>
 #include <lz4.h>
+#include <lz4hc.h>
 
 #include <memory>
 #include <numeric>
@@ -162,9 +163,7 @@
 
 bool ImageWriter::Write(int image_fd,
                         const std::vector<const char*>& image_filenames,
-                        int oat_fd,
-                        const std::vector<const char*>& oat_filenames,
-                        const std::string& oat_location) {
+                        const std::vector<const char*>& oat_filenames) {
   // If image_fd or oat_fd are not kInvalidFd then we may have empty strings in image_filenames or
   // oat_filenames.
   CHECK(!image_filenames.empty());
@@ -172,95 +171,13 @@
     CHECK_EQ(image_filenames.size(), 1u);
   }
   CHECK(!oat_filenames.empty());
-  if (oat_fd != kInvalidFd) {
-    CHECK_EQ(oat_filenames.size(), 1u);
-  }
   CHECK_EQ(image_filenames.size(), oat_filenames.size());
 
-  size_t oat_file_offset = 0;
-
-  for (size_t i = 0; i < oat_filenames.size(); ++i) {
-    const char* oat_filename = oat_filenames[i];
-    std::unique_ptr<File> oat_file;
-
-    if (oat_fd != -1) {
-      if (strlen(oat_filename) == 0u) {
-        oat_file.reset(new File(oat_fd, false));
-      } else {
-        oat_file.reset(new File(oat_fd, oat_filename, false));
-      }
-      int length = oat_file->GetLength();
-      if (length < 0) {
-        PLOG(ERROR) << "Oat file has negative length " << length;
-        return false;
-      } else {
-        // Leave the fd open since dex2oat still needs to write out the oat file with the fd.
-        oat_file->DisableAutoClose();
-      }
-    } else {
-      oat_file.reset(OS::OpenFileReadWrite(oat_filename));
-    }
-    if (oat_file == nullptr) {
-      PLOG(ERROR) << "Failed to open oat file " << oat_filename;
-      return false;
-    }
-    std::string error_msg;
-    oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_filename, nullptr, &error_msg);
-    if (oat_file_ == nullptr) {
-      PLOG(ERROR) << "Failed to open writable oat file " << oat_filename;
-      oat_file->Erase();
-      return false;
-    }
-    Runtime::Current()->GetOatFileManager().RegisterOatFile(
-        std::unique_ptr<const OatFile>(oat_file_));
-
-    const OatHeader& oat_header = oat_file_->GetOatHeader();
-    ImageInfo& image_info = GetImageInfo(oat_filename);
-
-    size_t oat_loaded_size = 0;
-    size_t oat_data_offset = 0;
-    ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
-
-    DCHECK_EQ(image_info.oat_offset_, oat_file_offset);
-    oat_file_offset += oat_loaded_size;
-
-    if (i == 0) {
-      // Primary oat file, read the trampolines.
-      image_info.oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] =
-          oat_header.GetInterpreterToInterpreterBridgeOffset();
-      image_info.oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] =
-          oat_header.GetInterpreterToCompiledCodeBridgeOffset();
-      image_info.oat_address_offsets_[kOatAddressJNIDlsymLookup] =
-          oat_header.GetJniDlsymLookupOffset();
-      image_info.oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] =
-          oat_header.GetQuickGenericJniTrampolineOffset();
-      image_info.oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] =
-          oat_header.GetQuickImtConflictTrampolineOffset();
-      image_info.oat_address_offsets_[kOatAddressQuickResolutionTrampoline] =
-          oat_header.GetQuickResolutionTrampolineOffset();
-      image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] =
-          oat_header.GetQuickToInterpreterBridgeOffset();
-    }
-
-
-    {
-      ScopedObjectAccess soa(Thread::Current());
-      CreateHeader(oat_loaded_size, oat_data_offset);
-      CopyAndFixupNativeData();
-    }
-
-    SetOatChecksumFromElfFile(oat_file.get());
-
-    if (oat_fd != -1) {
-      // Leave fd open for caller.
-      if (oat_file->Flush() != 0) {
-        LOG(ERROR) << "Failed to flush oat file " << oat_filename << " for " << oat_location;
-        return false;
-      }
-    } else if (oat_file->FlushCloseOrErase() != 0) {
-      LOG(ERROR) << "Failed to flush and close oat file " << oat_filename
-                 << " for " << oat_location;
-      return false;
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    for (size_t i = 0; i < oat_filenames.size(); ++i) {
+      CreateHeader(i);
+      CopyAndFixupNativeData(i);
     }
   }
 
@@ -273,8 +190,7 @@
 
   for (size_t i = 0; i < image_filenames.size(); ++i) {
     const char* image_filename = image_filenames[i];
-    const char* oat_filename = oat_filenames[i];
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+    ImageInfo& image_info = GetImageInfo(i);
     std::unique_ptr<File> image_file;
     if (image_fd != kInvalidFd) {
       if (strlen(image_filename) == 0u) {
@@ -309,18 +225,28 @@
     char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader);
     size_t data_size;
     const char* image_data_to_write;
+    const uint64_t compress_start_time = NanoTime();
 
     CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
     switch (image_storage_mode_) {
       case ImageHeader::kStorageModeLZ4: {
-        size_t compressed_max_size = LZ4_compressBound(image_data_size);
+        const size_t compressed_max_size = LZ4_compressBound(image_data_size);
         compressed_data.reset(new char[compressed_max_size]);
         data_size = LZ4_compress(
             reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
             &compressed_data[0],
             image_data_size);
-        image_data_to_write = &compressed_data[0];
-        VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size;
+
+        break;
+      }
+      case ImageHeader::kStorageModeLZ4HC: {
+        // Bound is same as non HC.
+        const size_t compressed_max_size = LZ4_compressBound(image_data_size);
+        compressed_data.reset(new char[compressed_max_size]);
+        data_size = LZ4_compressHC(
+            reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
+            &compressed_data[0],
+            image_data_size);
         break;
       }
       case ImageHeader::kStorageModeUncompressed: {
@@ -334,6 +260,12 @@
       }
     }
 
+    if (compressed_data != nullptr) {
+      image_data_to_write = &compressed_data[0];
+      VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in "
+                     << PrettyDuration(NanoTime() - compress_start_time);
+    }
+
     // Write header first, as uncompressed.
     image_header->data_size_ = data_size;
     if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) {
@@ -396,8 +328,8 @@
   DCHECK(object != nullptr);
   DCHECK_NE(image_objects_offset_begin_, 0u);
 
-  const char* oat_filename = GetOatFilename(object);
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(object);
+  ImageInfo& image_info = GetImageInfo(oat_index);
   size_t bin_slot_offset = image_info.bin_slot_offsets_[bin_slot.GetBin()];
   size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
   DCHECK_ALIGNED(new_offset, kObjectAlignment);
@@ -417,8 +349,8 @@
   DCHECK(IsImageOffsetAssigned(object));
   LockWord lock_word = object->GetLockWord(false);
   size_t offset = lock_word.ForwardingAddress();
-  const char* oat_filename = GetOatFilename(object);
-  const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(object);
+  const ImageInfo& image_info = GetImageInfo(oat_index);
   DCHECK_LT(offset, image_info.image_end_);
   return offset;
 }
@@ -461,8 +393,8 @@
   // Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned()
   // when AssignImageBinSlot() assigns their indexes out or order.
   for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
-    auto it = dex_file_oat_filename_map_.find(dex_file);
-    DCHECK(it != dex_file_oat_filename_map_.end()) << dex_file->GetLocation();
+    auto it = dex_file_oat_index_map_.find(dex_file);
+    DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
     ImageInfo& image_info = GetImageInfo(it->second);
     image_info.dex_cache_array_starts_.Put(dex_file, image_info.bin_slot_sizes_[kBinDexCacheArray]);
     DexCacheArraysLayout layout(target_ptr_size_, dex_file);
@@ -481,8 +413,8 @@
     const DexFile* dex_file = dex_cache->GetDexFile();
     DexCacheArraysLayout layout(target_ptr_size_, dex_file);
     DCHECK(layout.Valid());
-    const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+    size_t oat_index = GetOatIndexForDexCache(dex_cache);
+    ImageInfo& image_info = GetImageInfo(oat_index);
     uint32_t start = image_info.dex_cache_array_starts_.Get(dex_file);
     DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr);
     AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(),
@@ -504,9 +436,9 @@
 void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset, DexCache* dex_cache) {
   if (array != nullptr) {
     DCHECK(!IsInBootImage(array));
-    const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
+    size_t oat_index = GetOatIndexForDexCache(dex_cache);
     native_object_relocations_.emplace(array,
-        NativeObjectRelocation { oat_filename, offset, kNativeObjectRelocationTypeDexCacheArray });
+        NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeDexCacheArray });
   }
 }
 
@@ -621,8 +553,8 @@
     }  // else bin = kBinRegular
   }
 
-  const char* oat_filename = GetOatFilename(object);
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(object);
+  ImageInfo& image_info = GetImageInfo(oat_index);
 
   size_t offset_delta = RoundUp(object_size, kObjectAlignment);  // 64-bit alignment
   current_offset = image_info.bin_slot_sizes_[bin];  // How many bytes the current bin is at (aligned).
@@ -658,8 +590,8 @@
     LockWord lock_word = object->GetLockWord(false);
     size_t offset = lock_word.ForwardingAddress();
     BinSlot bin_slot(offset);
-    const char* oat_filename = GetOatFilename(object);
-    const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+    size_t oat_index = GetOatIndex(object);
+    const ImageInfo& image_info = GetImageInfo(oat_index);
     DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()])
         << "bin slot offset should not exceed the size of that bin";
   }
@@ -675,16 +607,15 @@
   DCHECK_LE(offset, std::numeric_limits<uint32_t>::max());
 
   BinSlot bin_slot(static_cast<uint32_t>(offset));
-  const char* oat_filename = GetOatFilename(object);
-  const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(object);
+  const ImageInfo& image_info = GetImageInfo(oat_index);
   DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]);
 
   return bin_slot;
 }
 
 bool ImageWriter::AllocMemory() {
-  for (const char* oat_filename : oat_filenames_) {
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+  for (ImageInfo& image_info : image_infos_) {
     ImageSection unused_sections[ImageHeader::kSectionCount];
     const size_t length = RoundUp(
         image_info.CreateImageSections(target_ptr_size_, unused_sections),
@@ -917,7 +848,7 @@
       // Copied methods may be held live by a class which was not an image class but have a
       // declaring class which is an image class. Set it to the resolution method to be safe and
       // prevent dangling pointers.
-      if (method->MightBeCopied() || !KeepClass(declaring_class)) {
+      if (method->IsCopied() || !KeepClass(declaring_class)) {
         mirror::DexCache::SetElementPtrSize(resolved_methods,
                                             i,
                                             resolution_method,
@@ -977,8 +908,7 @@
 
 mirror::String* ImageWriter::FindInternedString(mirror::String* string) {
   Thread* const self = Thread::Current();
-  for (auto& pair : image_info_map_) {
-    const ImageInfo& image_info = pair.second;
+  for (const ImageInfo& image_info : image_infos_) {
     mirror::String* const found = image_info.intern_table_->LookupStrong(self, string);
     DCHECK(image_info.intern_table_->LookupWeak(self, string) == nullptr)
         << string->ToModifiedUtf8();
@@ -1005,8 +935,8 @@
   DCHECK(obj != nullptr);
   // if it is a string, we want to intern it if its not interned.
   if (obj->GetClass()->IsStringClass()) {
-    const char* oat_filename = GetOatFilename(obj);
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+    size_t oat_index = GetOatIndex(obj);
+    ImageInfo& image_info = GetImageInfo(oat_index);
 
     // we must be an interned string that was forward referenced and already assigned
     if (IsImageBinSlotAssigned(obj)) {
@@ -1035,7 +965,7 @@
   AssignImageBinSlot(obj);
 }
 
-ObjectArray<Object>* ImageWriter::CreateImageRoots(const char* oat_filename) const {
+ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   Thread* self = Thread::Current();
@@ -1044,10 +974,10 @@
       class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
 
   std::unordered_set<const DexFile*> image_dex_files;
-  for (auto& pair : dex_file_oat_filename_map_) {
+  for (auto& pair : dex_file_oat_index_map_) {
     const DexFile* image_dex_file = pair.first;
-    const char* image_oat_filename = pair.second;
-    if (strcmp(oat_filename, image_oat_filename) == 0) {
+    size_t image_oat_index = pair.second;
+    if (oat_index == image_oat_index) {
       image_dex_files.insert(image_dex_file);
     }
   }
@@ -1172,8 +1102,8 @@
       LengthPrefixedArray<ArtField>* fields[] = {
           as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
       };
-      const char* oat_file = GetOatFilenameForDexCache(dex_cache);
-      ImageInfo& image_info = GetImageInfo(oat_file);
+      size_t oat_index = GetOatIndexForDexCache(dex_cache);
+      ImageInfo& image_info = GetImageInfo(oat_index);
       {
         // Note: This table is only accessed from the image writer, so the lock is technically
         // unnecessary.
@@ -1191,8 +1121,11 @@
                                                   << " already forwarded";
           size_t& offset = image_info.bin_slot_sizes_[kBinArtField];
           DCHECK(!IsInBootImage(cur_fields));
-          native_object_relocations_.emplace(cur_fields,
-              NativeObjectRelocation {oat_file, offset, kNativeObjectRelocationTypeArtFieldArray });
+          native_object_relocations_.emplace(
+              cur_fields,
+              NativeObjectRelocation {
+                  oat_index, offset, kNativeObjectRelocationTypeArtFieldArray
+              });
           offset += header_size;
           // Forward individual fields so that we can quickly find where they belong.
           for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
@@ -1202,8 +1135,9 @@
             CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i
                 << " already assigned " << PrettyField(field) << " static=" << field->IsStatic();
             DCHECK(!IsInBootImage(field));
-            native_object_relocations_.emplace(field,
-                NativeObjectRelocation {oat_file, offset, kNativeObjectRelocationTypeArtField });
+            native_object_relocations_.emplace(
+                field,
+                NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeArtField });
             offset += sizeof(ArtField);
           }
         }
@@ -1236,13 +1170,13 @@
         DCHECK(!IsInBootImage(array));
         native_object_relocations_.emplace(array,
             NativeObjectRelocation {
-                oat_file,
+                oat_index,
                 offset,
                 any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty
                           : kNativeObjectRelocationTypeArtMethodArrayClean });
         offset += header_size;
         for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
-          AssignMethodOffset(&m, type, oat_file);
+          AssignMethodOffset(&m, type, oat_index);
         }
         (any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
       }
@@ -1270,14 +1204,14 @@
 
 void ImageWriter::AssignMethodOffset(ArtMethod* method,
                                      NativeObjectRelocationType type,
-                                     const char* oat_filename) {
+                                     size_t oat_index) {
   DCHECK(!IsInBootImage(method));
   auto it = native_object_relocations_.find(method);
   CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
       << PrettyMethod(method);
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+  ImageInfo& image_info = GetImageInfo(oat_index);
   size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
-  native_object_relocations_.emplace(method, NativeObjectRelocation { oat_filename, offset, type });
+  native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type });
   offset += ArtMethod::Size(target_ptr_size_);
 }
 
@@ -1312,9 +1246,8 @@
   Thread* const self = Thread::Current();
   StackHandleScopeCollection handles(self);
   std::vector<Handle<ObjectArray<Object>>> image_roots;
-  for (const char* oat_filename : oat_filenames_) {
-    std::string image_filename = oat_filename;
-    image_roots.push_back(handles.NewHandle(CreateImageRoots(image_filename.c_str())));
+  for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
+    image_roots.push_back(handles.NewHandle(CreateImageRoots(i)));
   }
 
   auto* runtime = Runtime::Current();
@@ -1340,12 +1273,12 @@
   const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
   auto it = native_object_relocations_.find(&image_method_array_);
   CHECK(it == native_object_relocations_.end());
-  ImageInfo& default_image_info = GetImageInfo(default_oat_filename_);
+  ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex());
   size_t& offset =
       default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
   if (!compile_app_image_) {
     native_object_relocations_.emplace(&image_method_array_,
-        NativeObjectRelocation { default_oat_filename_, offset, image_method_type });
+        NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type });
   }
   size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
   const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
@@ -1357,15 +1290,14 @@
     CHECK(m->IsRuntimeMethod());
     DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
     if (!IsInBootImage(m)) {
-      AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, default_oat_filename_);
+      AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex());
     }
   }
   // Calculate size of the dex cache arrays slot and prepare offsets.
   PrepareDexCacheArraySlots();
 
   // Calculate the sizes of the intern tables and class tables.
-  for (const char* oat_filename : oat_filenames_) {
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+  for (ImageInfo& image_info : image_infos_) {
     // Calculate how big the intern table will be after being serialized.
     InternTable* const intern_table = image_info.intern_table_.get();
     CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
@@ -1376,8 +1308,7 @@
   }
 
   // Calculate bin slot offsets.
-  for (const char* oat_filename : oat_filenames_) {
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+  for (ImageInfo& image_info : image_infos_) {
     size_t bin_offset = image_objects_offset_begin_;
     for (size_t i = 0; i != kBinSize; ++i) {
       image_info.bin_slot_offsets_[i] = bin_offset;
@@ -1397,8 +1328,7 @@
 
   // Calculate image offsets.
   size_t image_offset = 0;
-  for (const char* oat_filename : oat_filenames_) {
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+  for (ImageInfo& image_info : image_infos_) {
     image_info.image_begin_ = global_image_begin_ + image_offset;
     image_info.image_offset_ = image_offset;
     ImageSection unused_sections[ImageHeader::kSectionCount];
@@ -1415,8 +1345,7 @@
   // DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
 
   size_t i = 0;
-  for (const char* oat_filename : oat_filenames_) {
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+  for (ImageInfo& image_info : image_infos_) {
     image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get()));
     i++;
   }
@@ -1425,7 +1354,7 @@
   for (auto& pair : native_object_relocations_) {
     NativeObjectRelocation& relocation = pair.second;
     Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
-    ImageInfo& image_info = GetImageInfo(relocation.oat_filename);
+    ImageInfo& image_info = GetImageInfo(relocation.oat_index);
     relocation.offset += image_info.bin_slot_offsets_[bin_type];
   }
 
@@ -1474,15 +1403,11 @@
   return cur_pos;
 }
 
-void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
-  CHECK_NE(0U, oat_loaded_size);
-  const char* oat_filename = oat_file_->GetLocation().c_str();
-  ImageInfo& image_info = GetImageInfo(oat_filename);
-  const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename);
-  const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
-  image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset;
-  const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size();
-  image_info.oat_size_ = oat_file_->Size();
+void ImageWriter::CreateHeader(size_t oat_index) {
+  ImageInfo& image_info = GetImageInfo(oat_index);
+  const uint8_t* oat_file_begin = image_info.oat_file_begin_;
+  const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_;
+  const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_;
 
   // Create the image sections.
   ImageSection sections[ImageHeader::kSectionCount];
@@ -1493,7 +1418,7 @@
   auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
   *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
   if (VLOG_IS_ON(compiler)) {
-    LOG(INFO) << "Creating header for " << oat_filename;
+    LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
     size_t idx = 0;
     for (const ImageSection& section : sections) {
       LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section;
@@ -1522,7 +1447,7 @@
                                                image_end,
                                                sections,
                                                image_info.image_roots_address_,
-                                               oat_file_->GetOatHeader().GetChecksum(),
+                                               image_info.oat_checksum_,
                                                PointerToLowMemUInt32(oat_file_begin),
                                                PointerToLowMemUInt32(image_info.oat_data_begin_),
                                                PointerToLowMemUInt32(oat_data_end),
@@ -1541,8 +1466,8 @@
 ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
   auto it = native_object_relocations_.find(method);
   CHECK(it != native_object_relocations_.end()) << PrettyMethod(method) << " @ " << method;
-  const char* oat_filename = GetOatFilename(method->GetDexCache());
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(method->GetDexCache());
+  ImageInfo& image_info = GetImageInfo(oat_index);
   CHECK_GE(it->second.offset, image_info.image_end_) << "ArtMethods should be after Objects";
   return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + it->second.offset);
 }
@@ -1571,14 +1496,13 @@
   ImageWriter* const image_writer_;
 };
 
-void ImageWriter::CopyAndFixupNativeData() {
-  const char* oat_filename = oat_file_->GetLocation().c_str();
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
+  ImageInfo& image_info = GetImageInfo(oat_index);
   // Copy ArtFields and methods to their locations and update the array for convenience.
   for (auto& pair : native_object_relocations_) {
     NativeObjectRelocation& relocation = pair.second;
     // Only work with fields and methods that are in the current oat file.
-    if (strcmp(relocation.oat_filename, oat_filename) != 0) {
+    if (relocation.oat_index != oat_index) {
       continue;
     }
     auto* dest = image_info.image_->Begin() + relocation.offset;
@@ -1624,7 +1548,7 @@
     ArtMethod* method = image_methods_[i];
     CHECK(method != nullptr);
     // Only place runtime methods in the image of the default oat file.
-    if (method->IsRuntimeMethod() && strcmp(default_oat_filename_, oat_filename) != 0) {
+    if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) {
       continue;
     }
     if (!IsInBootImage(method)) {
@@ -1729,7 +1653,7 @@
         }
         UNREACHABLE();
       } else {
-        ImageInfo& image_info = GetImageInfo(it->second.oat_filename);
+        ImageInfo& image_info = GetImageInfo(it->second.oat_index);
         elem = image_info.image_begin_ + it->second.offset;
       }
     }
@@ -1742,8 +1666,8 @@
     return;
   }
   size_t offset = GetImageOffset(obj);
-  const char* oat_filename = GetOatFilename(obj);
-  ImageInfo& image_info = GetImageInfo(oat_filename);
+  size_t oat_index = GetOatIndex(obj);
+  ImageInfo& image_info = GetImageInfo(oat_index);
   auto* dst = reinterpret_cast<Object*>(image_info.image_->Begin() + offset);
   DCHECK_LT(offset, image_info.image_end_);
   const auto* src = reinterpret_cast<const uint8_t*>(obj);
@@ -1835,7 +1759,7 @@
     CHECK(it != native_object_relocations_.end()) << obj << " spaces "
         << Runtime::Current()->GetHeap()->DumpSpaces();
     const NativeObjectRelocation& relocation = it->second;
-    ImageInfo& image_info = GetImageInfo(relocation.oat_filename);
+    ImageInfo& image_info = GetImageInfo(relocation.oat_index);
     return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset);
   }
 }
@@ -1845,8 +1769,8 @@
   if (obj == nullptr || IsInBootImage(obj)) {
     return obj;
   } else {
-    const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
-    ImageInfo& image_info = GetImageInfo(oat_filename);
+    size_t oat_index = GetOatIndexForDexCache(dex_cache);
+    ImageInfo& image_info = GetImageInfo(oat_index);
     return reinterpret_cast<T*>(image_info.image_->Begin() + NativeOffsetInImage(obj));
   }
 }
@@ -2044,9 +1968,19 @@
   // trampoline.
 
   // Quick entrypoint:
-  uint32_t quick_oat_code_offset = PointerToLowMemUInt32(
-      method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_));
-  const uint8_t* quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info);
+  const void* quick_oat_entry_point =
+      method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_);
+  const uint8_t* quick_code;
+
+  if (UNLIKELY(IsInBootImage(method->GetDeclaringClass()))) {
+    DCHECK(method->IsCopied());
+    // If the code is not in the oat file corresponding to this image (e.g. default methods)
+    quick_code = reinterpret_cast<const uint8_t*>(quick_oat_entry_point);
+  } else {
+    uint32_t quick_oat_code_offset = PointerToLowMemUInt32(quick_oat_entry_point);
+    quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info);
+  }
+
   *quick_is_interpreted = false;
   if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() ||
       method->GetDeclaringClass()->IsInitialized())) {
@@ -2129,34 +2063,6 @@
   }
 }
 
-static OatHeader* GetOatHeaderFromElf(ElfFile* elf) {
-  uint64_t data_sec_offset;
-  bool has_data_sec = elf->GetSectionOffsetAndSize(".rodata", &data_sec_offset, nullptr);
-  if (!has_data_sec) {
-    return nullptr;
-  }
-  return reinterpret_cast<OatHeader*>(elf->Begin() + data_sec_offset);
-}
-
-void ImageWriter::SetOatChecksumFromElfFile(File* elf_file) {
-  std::string error_msg;
-  std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file,
-                                             PROT_READ | PROT_WRITE,
-                                             MAP_SHARED,
-                                             &error_msg));
-  if (elf.get() == nullptr) {
-    LOG(FATAL) << "Unable open oat file: " << error_msg;
-    return;
-  }
-  OatHeader* oat_header = GetOatHeaderFromElf(elf.get());
-  CHECK(oat_header != nullptr);
-  CHECK(oat_header->IsValid());
-
-  ImageInfo& image_info = GetImageInfo(oat_file_->GetLocation().c_str());
-  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
-  image_header->SetOatChecksum(oat_header->GetChecksum());
-}
-
 size_t ImageWriter::GetBinSizeSum(ImageWriter::ImageInfo& image_info, ImageWriter::Bin up_to) const {
   DCHECK_LE(up_to, kBinSize);
   return std::accumulate(&image_info.bin_slot_sizes_[0],
@@ -2187,19 +2093,6 @@
   return lockword_ & ~kBinMask;
 }
 
-uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const {
-  uintptr_t last_image_end = 0;
-  for (const char* oat_fn : oat_filenames_) {
-    const ImageInfo& image_info = GetConstImageInfo(oat_fn);
-    DCHECK(image_info.image_begin_ != nullptr);
-    uintptr_t this_end = reinterpret_cast<uintptr_t>(image_info.image_begin_) +
-        image_info.image_size_;
-    last_image_end = std::max(this_end, last_image_end);
-  }
-  const ImageInfo& image_info = GetConstImageInfo(oat_filename);
-  return reinterpret_cast<uint8_t*>(last_image_end) + image_info.oat_offset_;
-}
-
 ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) {
   switch (type) {
     case kNativeObjectRelocationTypeArtField:
@@ -2217,91 +2110,110 @@
   UNREACHABLE();
 }
 
-const char* ImageWriter::GetOatFilename(mirror::Object* obj) const {
+size_t ImageWriter::GetOatIndex(mirror::Object* obj) const {
   if (compile_app_image_) {
-    return default_oat_filename_;
+    return GetDefaultOatIndex();
   } else {
-    return GetOatFilenameForDexCache(obj->IsDexCache() ? obj->AsDexCache() :
-        obj->IsClass() ? obj->AsClass()->GetDexCache() : obj->GetClass()->GetDexCache());
+    mirror::DexCache* dex_cache =
+        obj->IsDexCache() ? obj->AsDexCache()
+                          : obj->IsClass() ? obj->AsClass()->GetDexCache()
+                                           : obj->GetClass()->GetDexCache();
+    return GetOatIndexForDexCache(dex_cache);
   }
 }
 
-const char* ImageWriter::GetOatFilenameForDexCache(mirror::DexCache* dex_cache) const {
-  if (compile_app_image_ || dex_cache == nullptr) {
-    return default_oat_filename_;
+size_t ImageWriter::GetOatIndexForDexFile(const DexFile* dex_file) const {
+  if (compile_app_image_) {
+    return GetDefaultOatIndex();
   } else {
-    auto it = dex_file_oat_filename_map_.find(dex_cache->GetDexFile());
-    DCHECK(it != dex_file_oat_filename_map_.end()) << dex_cache->GetDexFile()->GetLocation();
+    auto it = dex_file_oat_index_map_.find(dex_file);
+    DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
     return it->second;
   }
 }
 
-ImageWriter::ImageInfo& ImageWriter::GetImageInfo(const char* oat_filename) {
-  auto it = image_info_map_.find(oat_filename);
-  DCHECK(it != image_info_map_.end());
-  return it->second;
+size_t ImageWriter::GetOatIndexForDexCache(mirror::DexCache* dex_cache) const {
+  if (dex_cache == nullptr) {
+    return GetDefaultOatIndex();
+  } else {
+    return GetOatIndexForDexFile(dex_cache->GetDexFile());
+  }
 }
 
-const ImageWriter::ImageInfo& ImageWriter::GetConstImageInfo(const char* oat_filename) const {
-  auto it = image_info_map_.find(oat_filename);
-  DCHECK(it != image_info_map_.end());
-  return it->second;
-}
+void ImageWriter::UpdateOatFileLayout(size_t oat_index,
+                                      size_t oat_loaded_size,
+                                      size_t oat_data_offset,
+                                      size_t oat_data_size) {
+  const uint8_t* images_end = image_infos_.back().image_begin_ + image_infos_.back().image_size_;
+  for (const ImageInfo& info : image_infos_) {
+    DCHECK_LE(info.image_begin_ + info.image_size_, images_end);
+  }
+  DCHECK(images_end != nullptr);  // Image space must be ready.
 
-const ImageWriter::ImageInfo& ImageWriter::GetImageInfo(size_t index) const {
-  DCHECK_LT(index, oat_filenames_.size());
-  return GetConstImageInfo(oat_filenames_[index]);
-}
+  ImageInfo& cur_image_info = GetImageInfo(oat_index);
+  cur_image_info.oat_file_begin_ = images_end + cur_image_info.oat_offset_;
+  cur_image_info.oat_loaded_size_ = oat_loaded_size;
+  cur_image_info.oat_data_begin_ = cur_image_info.oat_file_begin_ + oat_data_offset;
+  cur_image_info.oat_size_ = oat_data_size;
 
-void ImageWriter::UpdateOatFile(File* oat_file, const char* oat_filename) {
-  DCHECK(oat_file != nullptr);
   if (compile_app_image_) {
     CHECK_EQ(oat_filenames_.size(), 1u) << "App image should have no next image.";
     return;
   }
-  ImageInfo& cur_image_info = GetImageInfo(oat_filename);
 
   // Update the oat_offset of the next image info.
-  auto it = std::find(oat_filenames_.begin(), oat_filenames_.end(), oat_filename);
-  DCHECK(it != oat_filenames_.end());
-
-  it++;
-  if (it != oat_filenames_.end()) {
-    size_t oat_loaded_size = 0;
-    size_t oat_data_offset = 0;
-    ElfWriter::GetOatElfInformation(oat_file, &oat_loaded_size, &oat_data_offset);
+  if (oat_index + 1u != oat_filenames_.size()) {
     // There is a following one.
-    ImageInfo& next_image_info = GetImageInfo(*it);
+    ImageInfo& next_image_info = GetImageInfo(oat_index + 1u);
     next_image_info.oat_offset_ = cur_image_info.oat_offset_ + oat_loaded_size;
   }
 }
 
+void ImageWriter::UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header) {
+  ImageInfo& cur_image_info = GetImageInfo(oat_index);
+  cur_image_info.oat_checksum_ = oat_header.GetChecksum();
+
+  if (oat_index == GetDefaultOatIndex()) {
+    // Primary oat file, read the trampolines.
+    cur_image_info.oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] =
+        oat_header.GetInterpreterToInterpreterBridgeOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] =
+        oat_header.GetInterpreterToCompiledCodeBridgeOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressJNIDlsymLookup] =
+        oat_header.GetJniDlsymLookupOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] =
+        oat_header.GetQuickGenericJniTrampolineOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] =
+        oat_header.GetQuickImtConflictTrampolineOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressQuickResolutionTrampoline] =
+        oat_header.GetQuickResolutionTrampolineOffset();
+    cur_image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] =
+        oat_header.GetQuickToInterpreterBridgeOffset();
+  }
+}
+
 ImageWriter::ImageWriter(
     const CompilerDriver& compiler_driver,
     uintptr_t image_begin,
     bool compile_pic,
     bool compile_app_image,
     ImageHeader::StorageMode image_storage_mode,
-    const std::vector<const char*> oat_filenames,
-    const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
+    const std::vector<const char*>& oat_filenames,
+    const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map)
     : compiler_driver_(compiler_driver),
       global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
       image_objects_offset_begin_(0),
-      oat_file_(nullptr),
       compile_pic_(compile_pic),
       compile_app_image_(compile_app_image),
       target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+      image_infos_(oat_filenames.size()),
       image_method_array_(ImageHeader::kImageMethodsCount),
       dirty_methods_(0u),
       clean_methods_(0u),
       image_storage_mode_(image_storage_mode),
-      dex_file_oat_filename_map_(dex_file_oat_filename_map),
       oat_filenames_(oat_filenames),
-      default_oat_filename_(oat_filenames[0]) {
+      dex_file_oat_index_map_(dex_file_oat_index_map) {
   CHECK_NE(image_begin, 0U);
-  for (const char* oat_filename : oat_filenames) {
-    image_info_map_.emplace(oat_filename, ImageInfo());
-  }
   std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
   CHECK_EQ(compile_app_image, !Runtime::Current()->GetHeap()->GetBootImageSpaces().empty())
       << "Compiling a boot image should occur iff there are no boot image spaces loaded";
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index ee204c5..dba9dd7 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -27,6 +27,7 @@
 #include <ostream>
 
 #include "base/bit_utils.h"
+#include "base/dchecked_vector.h"
 #include "base/length_prefixed_array.h"
 #include "base/macros.h"
 #include "driver/compiler_driver.h"
@@ -59,20 +60,19 @@
               bool compile_pic,
               bool compile_app_image,
               ImageHeader::StorageMode image_storage_mode,
-              const std::vector<const char*> oat_filenames,
-              const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map);
+              const std::vector<const char*>& oat_filenames,
+              const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
 
   bool PrepareImageAddressSpace();
 
   bool IsImageAddressSpaceReady() const {
-    bool ready = !image_info_map_.empty();
-    for (auto& pair : image_info_map_) {
-      const ImageInfo& image_info = pair.second;
+    DCHECK(!image_infos_.empty());
+    for (const ImageInfo& image_info : image_infos_) {
       if (image_info.image_roots_address_ == 0u) {
         return false;
       }
     }
-    return ready;
+    return true;
   }
 
   template <typename T>
@@ -80,8 +80,8 @@
     if (object == nullptr || IsInBootImage(object)) {
       return object;
     } else {
-      const char* oat_filename = GetOatFilename(object);
-      const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+      size_t oat_index = GetOatIndex(object);
+      const ImageInfo& image_info = GetImageInfo(oat_index);
       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
     }
   }
@@ -91,9 +91,9 @@
   template <typename PtrType>
   PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
       const SHARED_REQUIRES(Locks::mutator_lock_) {
-    auto oat_it = dex_file_oat_filename_map_.find(dex_file);
-    DCHECK(oat_it != dex_file_oat_filename_map_.end());
-    const ImageInfo& image_info = GetConstImageInfo(oat_it->second);
+    auto oat_it = dex_file_oat_index_map_.find(dex_file);
+    DCHECK(oat_it != dex_file_oat_index_map_.end());
+    const ImageInfo& image_info = GetImageInfo(oat_it->second);
     auto it = image_info.dex_cache_array_starts_.find(dex_file);
     DCHECK(it != image_info.dex_cache_array_starts_.end());
     return reinterpret_cast<PtrType>(
@@ -101,7 +101,13 @@
             it->second + offset);
   }
 
-  uint8_t* GetOatFileBegin(const char* oat_filename) const;
+  size_t GetOatFileOffset(size_t oat_index) const {
+    return GetImageInfo(oat_index).oat_offset_;
+  }
+
+  const uint8_t* GetOatFileBegin(size_t oat_index) const {
+    return GetImageInfo(oat_index).oat_file_begin_;
+  }
 
   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
   // the names in image_filenames.
@@ -109,21 +115,32 @@
   // the names in oat_filenames.
   bool Write(int image_fd,
              const std::vector<const char*>& image_filenames,
-             int oat_fd,
-             const std::vector<const char*>& oat_filenames,
-             const std::string& oat_location)
+             const std::vector<const char*>& oat_filenames)
       REQUIRES(!Locks::mutator_lock_);
 
-  uintptr_t GetOatDataBegin(const char* oat_filename) {
-    return reinterpret_cast<uintptr_t>(GetImageInfo(oat_filename).oat_data_begin_);
+  uintptr_t GetOatDataBegin(size_t oat_index) {
+    return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
   }
 
-  const char* GetOatFilenameForDexCache(mirror::DexCache* dex_cache) const
+  // Get the index of the oat file containing the dex file.
+  //
+  // This "oat_index" is used to retrieve information about the the memory layout
+  // of the oat file and its associated image file, needed for link-time patching
+  // of references to the image or across oat files.
+  size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
+
+  // Get the index of the oat file containing the dex file served by the dex cache.
+  size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Update the oat size for the given oat file. This will make the oat_offset for the next oat
-  // file valid.
-  void UpdateOatFile(File* oat_file, const char* oat_filename);
+  // Update the oat layout for the given oat file.
+  // This will make the oat_offset for the next oat file valid.
+  void UpdateOatFileLayout(size_t oat_index,
+                           size_t oat_loaded_size,
+                           size_t oat_data_offset,
+                           size_t oat_data_size);
+  // Update information about the oat header, i.e. checksum and trampoline offsets.
+  void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
 
  private:
   bool AllocMemory();
@@ -247,10 +264,13 @@
     // Offset of the oat file for this image from start of oat files. This is
     // valid when the previous oat file has been written.
     size_t oat_offset_ = 0;
-    // Start of oatdata in the corresponding oat file. This is
-    // valid when the images have been layed out.
-    uint8_t* oat_data_begin_ = nullptr;
+    // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
+    const uint8_t* oat_file_begin_ = nullptr;
+    size_t oat_loaded_size_ = 0;
+    const uint8_t* oat_data_begin_ = nullptr;
     size_t oat_size_ = 0;  // Size of the corresponding oat data.
+    // The oat header checksum, valid after UpdateOatFileHeader().
+    uint32_t oat_checksum_ = 0u;
 
     // Image bitmap which lets us know where the objects inside of the image reside.
     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
@@ -310,8 +330,8 @@
   mirror::Object* GetLocalAddress(mirror::Object* object) const
       SHARED_REQUIRES(Locks::mutator_lock_) {
     size_t offset = GetImageOffset(object);
-    const char* oat_filename = GetOatFilename(object);
-    const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+    size_t oat_index = GetOatIndex(object);
+    const ImageInfo& image_info = GetImageInfo(oat_index);
     uint8_t* dst = image_info.image_->Begin() + offset;
     return reinterpret_cast<mirror::Object*>(dst);
   }
@@ -348,9 +368,9 @@
   // Lays out where the image objects will be at runtime.
   void CalculateNewObjectOffsets()
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
+  void CreateHeader(size_t oat_index)
       SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::ObjectArray<mirror::Object>* CreateImageRoots(const char* oat_filename) const
+  mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
       SHARED_REQUIRES(Locks::mutator_lock_);
   void CalculateObjectBinSlots(mirror::Object* obj)
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -367,7 +387,7 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Creates the contiguous image in memory and adjusts pointers.
-  void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_);
+  void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
   void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -392,9 +412,6 @@
                               bool* quick_is_interpreted)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Patches references in OatFile to expect runtime addresses.
-  void SetOatChecksumFromElfFile(File* elf_file);
-
   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
 
@@ -404,7 +421,7 @@
   // Assign the offset for an ArtMethod.
   void AssignMethodOffset(ArtMethod* method,
                           NativeObjectRelocationType type,
-                          const char* oat_filename)
+                          size_t oat_index)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return true if klass is loaded by the boot class loader but not in the boot image.
@@ -443,15 +460,21 @@
   // Return true if ptr is within the boot oat file.
   bool IsInBootOatFile(const void* ptr) const;
 
-  const char* GetOatFilename(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+  // Get the index of the oat file associated with the object.
+  size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetDefaultOatFilename() const {
-    return default_oat_filename_;
+  // The oat index for shared data in multi-image and all data in single-image compilation.
+  size_t GetDefaultOatIndex() const {
+    return 0u;
   }
 
-  ImageInfo& GetImageInfo(const char* oat_filename);
-  const ImageInfo& GetConstImageInfo(const char* oat_filename) const;
-  const ImageInfo& GetImageInfo(size_t index) const;
+  ImageInfo& GetImageInfo(size_t oat_index) {
+    return image_infos_[oat_index];
+  }
+
+  const ImageInfo& GetImageInfo(size_t oat_index) const {
+    return image_infos_[oat_index];
+  }
 
   // Find an already strong interned string in the other images or in the boot image. Used to
   // remove duplicates in the multi image and app image case.
@@ -465,9 +488,6 @@
   // Offset from image_begin_ to where the first object is in image_.
   size_t image_objects_offset_begin_;
 
-  // oat file with code for this image
-  OatFile* oat_file_;
-
   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
@@ -483,14 +503,14 @@
   // Size of pointers on the target architecture.
   size_t target_ptr_size_;
 
-  // Mapping of oat filename to image data.
-  std::unordered_map<std::string, ImageInfo> image_info_map_;
+  // Image data indexed by the oat file index.
+  dchecked_vector<ImageInfo> image_infos_;
 
   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
   // have one entry per art field for convenience. ArtFields are placed right after the end of the
   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
   struct NativeObjectRelocation {
-    const char* oat_filename;
+    size_t oat_index;
     uintptr_t offset;
     NativeObjectRelocationType type;
 
@@ -522,10 +542,11 @@
   // Which mode the image is stored as, see image.h
   const ImageHeader::StorageMode image_storage_mode_;
 
-  // Map of dex files to the oat filenames that they were compiled into.
-  const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map_;
-  const std::vector<const char*> oat_filenames_;
-  const char* default_oat_filename_;
+  // The file names of oat files.
+  const std::vector<const char*>& oat_filenames_;
+
+  // Map of dex files to the indexes of oat files that they were compiled into.
+  const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
 
   friend class ContainsBootClassLoaderNonImageClassVisitor;
   friend class FixupClassVisitor;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 909d682..79a6d38 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -163,7 +163,6 @@
       /* dump_passes */ false,
       cumulative_logger_.get(),
       /* swap_fd */ -1,
-      /* dex to oat map */ nullptr,
       /* profile_compilation_info */ nullptr));
   // Disable dedupe so we can remove compiled methods.
   compiler_driver_->SetDedupeEnabled(false);
@@ -231,10 +230,10 @@
   }
 
   // Trim maps to reduce memory usage.
-  // TODO: measure how much this increases compile time.
+  // TODO: move this to an idle phase.
   {
     TimingLogger::ScopedTiming t2("TrimMaps", &logger);
-    runtime->GetArenaPool()->TrimMaps();
+    runtime->GetJitArenaPool()->TrimMaps();
   }
 
   total_time_ += NanoTime() - start_time;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 8d60be2..cf836a9 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -220,8 +220,7 @@
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
-                  LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false,
-                                    nullptr, nullptr, &reason))
+                  LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
       << reason;
 
   jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -236,8 +235,7 @@
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
-                  LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false,
-                                    nullptr, nullptr, &reason))
+                  LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
       << reason;
 
   jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index 73b0fac..682b008 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -40,6 +40,11 @@
                                                 MethodReference(nullptr, 0u),
                                                 aligned_offset);
   if (needs_thunk) {
+    // All remaining patches will be handled by this thunk.
+    DCHECK(!unprocessed_patches_.empty());
+    DCHECK_LE(aligned_offset - unprocessed_patches_.front().second, max_positive_displacement_);
+    unprocessed_patches_.clear();
+
     thunk_locations_.push_back(aligned_offset);
     offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_);
   }
diff --git a/compiler/linker/arm/relative_patcher_arm_base.h b/compiler/linker/arm/relative_patcher_arm_base.h
index f80dd96..25fd35e 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.h
+++ b/compiler/linker/arm/relative_patcher_arm_base.h
@@ -27,18 +27,23 @@
 
 class ArmBaseRelativePatcher : public RelativePatcher {
  public:
-  uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method,
+  uint32_t ReserveSpace(uint32_t offset,
+                        const CompiledMethod* compiled_method,
                         MethodReference method_ref) OVERRIDE;
   uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
   uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
 
  protected:
   ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
-                         InstructionSet instruction_set, std::vector<uint8_t> thunk_code,
-                         uint32_t max_positive_displacement, uint32_t max_negative_displacement);
+                         InstructionSet instruction_set,
+                         std::vector<uint8_t> thunk_code,
+                         uint32_t max_positive_displacement,
+                         uint32_t max_negative_displacement);
 
-  uint32_t ReserveSpaceInternal(uint32_t offset, const CompiledMethod* compiled_method,
-                                MethodReference method_ref, uint32_t max_extra_space);
+  uint32_t ReserveSpaceInternal(uint32_t offset,
+                                const CompiledMethod* compiled_method,
+                                MethodReference method_ref,
+                                uint32_t max_extra_space);
   uint32_t CalculateDisplacement(uint32_t patch_offset, uint32_t target_offset);
 
  private:
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 5f4f760..c090dff 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -28,8 +28,10 @@
                              kMaxPositiveDisplacement, kMaxNegativeDisplacement) {
 }
 
-void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                                      uint32_t patch_offset, uint32_t target_offset) {
+void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
+                                      uint32_t literal_offset,
+                                      uint32_t patch_offset,
+                                      uint32_t target_offset) {
   DCHECK_LE(literal_offset + 4u, code->size());
   DCHECK_EQ(literal_offset & 1u, 0u);
   DCHECK_EQ(patch_offset & 1u, 0u);
diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h
index 006d6fb..0d903c0 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.h
+++ b/compiler/linker/arm/relative_patcher_thumb2.h
@@ -26,10 +26,14 @@
  public:
   explicit Thumb2RelativePatcher(RelativePatcherTargetProvider* provider);
 
-  void PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                 uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
-  void PatchDexCacheReference(std::vector<uint8_t>* code, const LinkerPatch& patch,
-                              uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
+  void PatchCall(std::vector<uint8_t>* code,
+                 uint32_t literal_offset,
+                 uint32_t patch_offset,
+                 uint32_t target_offset) OVERRIDE;
+  void PatchDexCacheReference(std::vector<uint8_t>* code,
+                              const LinkerPatch& patch,
+                              uint32_t patch_offset,
+                              uint32_t target_offset) OVERRIDE;
 
  private:
   static std::vector<uint8_t> CompileThunkCode();
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 3d4c218..a81c85c 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -131,8 +131,10 @@
   return ArmBaseRelativePatcher::WriteThunks(out, offset);
 }
 
-void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                                     uint32_t patch_offset, uint32_t target_offset) {
+void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code,
+                                     uint32_t literal_offset,
+                                     uint32_t patch_offset, uint32_t
+                                     target_offset) {
   DCHECK_LE(literal_offset + 4u, code->size());
   DCHECK_EQ(literal_offset & 3u, 0u);
   DCHECK_EQ(patch_offset & 3u, 0u);
diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h
index 2d07e75..f9b76e6 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.h
+++ b/compiler/linker/arm64/relative_patcher_arm64.h
@@ -28,14 +28,19 @@
   Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
                        const Arm64InstructionSetFeatures* features);
 
-  uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method,
+  uint32_t ReserveSpace(uint32_t offset,
+                        const CompiledMethod* compiled_method,
                         MethodReference method_ref) OVERRIDE;
   uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
   uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
-  void PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                 uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
-  void PatchDexCacheReference(std::vector<uint8_t>* code, const LinkerPatch& patch,
-                              uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
+  void PatchCall(std::vector<uint8_t>* code,
+                 uint32_t literal_offset,
+                 uint32_t patch_offset,
+                 uint32_t target_offset) OVERRIDE;
+  void PatchDexCacheReference(std::vector<uint8_t>* code,
+                              const LinkerPatch& patch,
+                              uint32_t patch_offset,
+                              uint32_t target_offset) OVERRIDE;
 
  private:
   static std::vector<uint8_t> CompileThunkCode();
diff --git a/compiler/linker/multi_oat_relative_patcher.cc b/compiler/linker/multi_oat_relative_patcher.cc
new file mode 100644
index 0000000..e9e242b
--- /dev/null
+++ b/compiler/linker/multi_oat_relative_patcher.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "multi_oat_relative_patcher.h"
+
+#include "globals.h"
+#include "base/bit_utils.h"
+#include "base/logging.h"
+
+namespace art {
+namespace linker {
+
+MultiOatRelativePatcher::MultiOatRelativePatcher(InstructionSet instruction_set,
+                                                 const InstructionSetFeatures* features)
+    : method_offset_map_(),
+      relative_patcher_(
+          linker::RelativePatcher::Create(instruction_set, features, &method_offset_map_)),
+      adjustment_(0u),
+      instruction_set_(instruction_set),
+      start_size_code_alignment_(0u),
+      start_size_relative_call_thunks_(0u),
+      start_size_misc_thunks_(0u) {
+}
+
+void MultiOatRelativePatcher::StartOatFile(uint32_t adjustment) {
+  DCHECK_ALIGNED(adjustment, kPageSize);
+  adjustment_ = adjustment;
+
+  start_size_code_alignment_ = relative_patcher_->CodeAlignmentSize();
+  start_size_relative_call_thunks_ = relative_patcher_->RelativeCallThunksSize();
+  start_size_misc_thunks_ = relative_patcher_->MiscThunksSize();
+}
+
+uint32_t MultiOatRelativePatcher::CodeAlignmentSize() const {
+  DCHECK_GE(relative_patcher_->CodeAlignmentSize(), start_size_code_alignment_);
+  return relative_patcher_->CodeAlignmentSize() - start_size_code_alignment_;
+}
+
+uint32_t MultiOatRelativePatcher::RelativeCallThunksSize() const {
+  DCHECK_GE(relative_patcher_->RelativeCallThunksSize(), start_size_relative_call_thunks_);
+  return relative_patcher_->RelativeCallThunksSize() - start_size_relative_call_thunks_;
+}
+
+uint32_t MultiOatRelativePatcher::MiscThunksSize() const {
+  DCHECK_GE(relative_patcher_->MiscThunksSize(), start_size_misc_thunks_);
+  return relative_patcher_->MiscThunksSize() - start_size_misc_thunks_;
+}
+
+std::pair<bool, uint32_t> MultiOatRelativePatcher::MethodOffsetMap::FindMethodOffset(
+    MethodReference ref) {
+  auto it = map.find(ref);
+  if (it == map.end()) {
+    return std::pair<bool, uint32_t>(false, 0u);
+  } else {
+    return std::pair<bool, uint32_t>(true, it->second);
+  }
+}
+}  // namespace linker
+}  // namespace art
diff --git a/compiler/linker/multi_oat_relative_patcher.h b/compiler/linker/multi_oat_relative_patcher.h
new file mode 100644
index 0000000..1727d52
--- /dev/null
+++ b/compiler/linker/multi_oat_relative_patcher.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LINKER_MULTI_OAT_RELATIVE_PATCHER_H_
+#define ART_COMPILER_LINKER_MULTI_OAT_RELATIVE_PATCHER_H_
+
+#include "arch/instruction_set.h"
+#include "method_reference.h"
+#include "relative_patcher.h"
+#include "safe_map.h"
+
+namespace art {
+
+class CompiledMethod;
+class LinkerPatch;
+class InstructionSetFeatures;
+
+namespace linker {
+
+// MultiOatRelativePatcher is a helper class for handling patching across
+// any number of oat files. It provides storage for method code offsets
+// and wraps RelativePatcher calls, adjusting relative offsets according
+// to the value set by SetAdjustment().
+class MultiOatRelativePatcher FINAL {
+ public:
+  using const_iterator =
+      SafeMap<MethodReference, uint32_t, MethodReferenceComparator>::const_iterator;
+
+  MultiOatRelativePatcher(InstructionSet instruction_set, const InstructionSetFeatures* features);
+
+  // Mark the start of a new oat file (for statistics retrieval) and set the
+  // adjustment for a new oat file to apply to all relative offsets that are
+  // passed to the MultiOatRelativePatcher.
+  //
+  // The adjustment should be the global offset of the base from which relative
+  // offsets are calculated, such as the start of .rodata for the current oat file.
+  // It must must never point directly to a method's code to avoid relative offsets
+  // with value 0 because this value is used as a missing offset indication in
+  // GetOffset() and an error indication in WriteThunks(). Additionally, it must be
+  // page-aligned, so that it does not skew alignment calculations, say arm64 ADRP.
+  void StartOatFile(uint32_t adjustment);
+
+  // Get relative offset. Returns 0 when the offset has not been set yet.
+  uint32_t GetOffset(MethodReference method_ref) {
+    auto it = method_offset_map_.map.find(method_ref);
+    return (it != method_offset_map_.map.end()) ? it->second - adjustment_ : 0u;
+  }
+
+  // Set the offset.
+  void SetOffset(MethodReference method_ref, uint32_t offset) {
+    method_offset_map_.map.Put(method_ref, offset + adjustment_);
+  }
+
+  // Wrapper around RelativePatcher::ReserveSpace(), doing offset adjustment.
+  uint32_t ReserveSpace(uint32_t offset,
+                        const CompiledMethod* compiled_method,
+                        MethodReference method_ref) {
+    offset += adjustment_;
+    offset = relative_patcher_->ReserveSpace(offset, compiled_method, method_ref);
+    offset -= adjustment_;
+    return offset;
+  }
+
+  // Wrapper around RelativePatcher::ReserveSpaceEnd(), doing offset adjustment.
+  uint32_t ReserveSpaceEnd(uint32_t offset) {
+    offset += adjustment_;
+    offset = relative_patcher_->ReserveSpaceEnd(offset);
+    offset -= adjustment_;
+    return offset;
+  }
+
+  // Wrapper around RelativePatcher::WriteThunks(), doing offset adjustment.
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) {
+    offset += adjustment_;
+    offset = relative_patcher_->WriteThunks(out, offset);
+    if (offset != 0u) {  // 0u indicates write error.
+      offset -= adjustment_;
+    }
+    return offset;
+  }
+
+  // Wrapper around RelativePatcher::PatchCall(), doing offset adjustment.
+  void PatchCall(std::vector<uint8_t>* code,
+                 uint32_t literal_offset,
+                 uint32_t patch_offset,
+                 uint32_t target_offset) {
+    patch_offset += adjustment_;
+    target_offset += adjustment_;
+    relative_patcher_->PatchCall(code, literal_offset, patch_offset, target_offset);
+  }
+
+  // Wrapper around RelativePatcher::PatchDexCacheReference(), doing offset adjustment.
+  void PatchDexCacheReference(std::vector<uint8_t>* code,
+                              const LinkerPatch& patch,
+                              uint32_t patch_offset,
+                              uint32_t target_offset) {
+    patch_offset += adjustment_;
+    target_offset += adjustment_;
+    relative_patcher_->PatchDexCacheReference(code, patch, patch_offset, target_offset);
+  }
+
+  // Wrappers around RelativePatcher for statistics retrieval.
+  uint32_t CodeAlignmentSize() const;
+  uint32_t RelativeCallThunksSize() const;
+  uint32_t MiscThunksSize() const;
+
+ private:
+  // Map method reference to assigned offset.
+  // Wrap the map in a class implementing linker::RelativePatcherTargetProvider.
+  class MethodOffsetMap : public linker::RelativePatcherTargetProvider {
+   public:
+    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
+    SafeMap<MethodReference, uint32_t, MethodReferenceComparator> map;
+  };
+
+  MethodOffsetMap method_offset_map_;
+  std::unique_ptr<RelativePatcher> relative_patcher_;
+  uint32_t adjustment_;
+  InstructionSet instruction_set_;
+
+  uint32_t start_size_code_alignment_;
+  uint32_t start_size_relative_call_thunks_;
+  uint32_t start_size_misc_thunks_;
+
+  friend class MultiOatRelativePatcherTest;
+
+  DISALLOW_COPY_AND_ASSIGN(MultiOatRelativePatcher);
+};
+
+}  // namespace linker
+}  // namespace art
+
+#endif  // ART_COMPILER_LINKER_MULTI_OAT_RELATIVE_PATCHER_H_
diff --git a/compiler/linker/multi_oat_relative_patcher_test.cc b/compiler/linker/multi_oat_relative_patcher_test.cc
new file mode 100644
index 0000000..792cdfe
--- /dev/null
+++ b/compiler/linker/multi_oat_relative_patcher_test.cc
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiled_method.h"
+#include "gtest/gtest.h"
+#include "multi_oat_relative_patcher.h"
+#include "vector_output_stream.h"
+
+namespace art {
+namespace linker {
+
+static const MethodReference kNullMethodRef = MethodReference(nullptr, 0u);
+
+static bool EqualRef(MethodReference lhs, MethodReference rhs) {
+  return lhs.dex_file == rhs.dex_file && lhs.dex_method_index == rhs.dex_method_index;
+}
+
+class MultiOatRelativePatcherTest : public testing::Test {
+ protected:
+  class MockPatcher : public RelativePatcher {
+   public:
+    MockPatcher() { }
+
+    uint32_t ReserveSpace(uint32_t offset,
+                          const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
+                          MethodReference method_ref) OVERRIDE {
+      last_reserve_offset_ = offset;
+      last_reserve_method_ = method_ref;
+      offset += next_reserve_adjustment_;
+      next_reserve_adjustment_ = 0u;
+      return offset;
+    }
+
+    uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+      last_reserve_offset_ = offset;
+      last_reserve_method_ = kNullMethodRef;
+      offset += next_reserve_adjustment_;
+      next_reserve_adjustment_ = 0u;
+      return offset;
+    }
+
+    uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+      last_write_offset_ = offset;
+      if (next_write_alignment_ != 0u) {
+        offset += next_write_alignment_;
+        bool success = WriteCodeAlignment(out, next_write_alignment_);
+        CHECK(success);
+        next_write_alignment_ = 0u;
+      }
+      if (next_write_call_thunk_ != 0u) {
+        offset += next_write_call_thunk_;
+        std::vector<uint8_t> thunk(next_write_call_thunk_, 'c');
+        bool success = WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk));
+        CHECK(success);
+        next_write_call_thunk_ = 0u;
+      }
+      if (next_write_misc_thunk_ != 0u) {
+        offset += next_write_misc_thunk_;
+        std::vector<uint8_t> thunk(next_write_misc_thunk_, 'm');
+        bool success = WriteMiscThunk(out, ArrayRef<const uint8_t>(thunk));
+        CHECK(success);
+        next_write_misc_thunk_ = 0u;
+      }
+      return offset;
+    }
+
+    void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                   uint32_t literal_offset,
+                   uint32_t patch_offset,
+                   uint32_t target_offset) OVERRIDE {
+      last_literal_offset_ = literal_offset;
+      last_patch_offset_ = patch_offset;
+      last_target_offset_ = target_offset;
+    }
+
+    void PatchDexCacheReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                                const LinkerPatch& patch,
+                                uint32_t patch_offset,
+                                uint32_t target_offset) OVERRIDE {
+      last_literal_offset_ = patch.LiteralOffset();
+      last_patch_offset_ = patch_offset;
+      last_target_offset_ = target_offset;
+    }
+
+    uint32_t last_reserve_offset_ = 0u;
+    MethodReference last_reserve_method_ = kNullMethodRef;
+    uint32_t next_reserve_adjustment_ = 0u;
+
+    uint32_t last_write_offset_ = 0u;
+    uint32_t next_write_alignment_ = 0u;
+    uint32_t next_write_call_thunk_ = 0u;
+    uint32_t next_write_misc_thunk_ = 0u;
+
+    uint32_t last_literal_offset_ = 0u;
+    uint32_t last_patch_offset_ = 0u;
+    uint32_t last_target_offset_ = 0u;
+  };
+
+  MultiOatRelativePatcherTest()
+      : instruction_set_features_(InstructionSetFeatures::FromCppDefines()),
+        patcher_(kRuntimeISA, instruction_set_features_.get()) {
+    std::unique_ptr<MockPatcher> mock(new MockPatcher());
+    mock_ = mock.get();
+    patcher_.relative_patcher_ = std::move(mock);
+  }
+
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+  MultiOatRelativePatcher patcher_;
+  MockPatcher* mock_;
+};
+
+TEST_F(MultiOatRelativePatcherTest, Offsets) {
+  const DexFile* dex_file = reinterpret_cast<const DexFile*>(1);
+  MethodReference ref1(dex_file, 1u);
+  MethodReference ref2(dex_file, 2u);
+  EXPECT_EQ(0u, patcher_.GetOffset(ref1));
+  EXPECT_EQ(0u, patcher_.GetOffset(ref2));
+
+  uint32_t adjustment1 = 0x1000;
+  patcher_.StartOatFile(adjustment1);
+  EXPECT_EQ(0u, patcher_.GetOffset(ref1));
+  EXPECT_EQ(0u, patcher_.GetOffset(ref2));
+
+  uint32_t off1 = 0x1234;
+  patcher_.SetOffset(ref1, off1);
+  EXPECT_EQ(off1, patcher_.GetOffset(ref1));
+  EXPECT_EQ(0u, patcher_.GetOffset(ref2));
+
+  uint32_t adjustment2 = 0x30000;
+  patcher_.StartOatFile(adjustment2);
+  EXPECT_EQ(off1 + adjustment1 - adjustment2, patcher_.GetOffset(ref1));
+  EXPECT_EQ(0u, patcher_.GetOffset(ref2));
+
+  uint32_t off2 = 0x4321;
+  patcher_.SetOffset(ref2, off2);
+  EXPECT_EQ(off1 + adjustment1 - adjustment2, patcher_.GetOffset(ref1));
+  EXPECT_EQ(off2, patcher_.GetOffset(ref2));
+
+  uint32_t adjustment3 = 0x78000;
+  patcher_.StartOatFile(adjustment3);
+  EXPECT_EQ(off1 + adjustment1 - adjustment3, patcher_.GetOffset(ref1));
+  EXPECT_EQ(off2 + adjustment2 - adjustment3, patcher_.GetOffset(ref2));
+}
+
+TEST_F(MultiOatRelativePatcherTest, OffsetsInReserve) {
+  const DexFile* dex_file = reinterpret_cast<const DexFile*>(1);
+  MethodReference ref1(dex_file, 1u);
+  MethodReference ref2(dex_file, 2u);
+  MethodReference ref3(dex_file, 3u);
+  const CompiledMethod* method = reinterpret_cast<const CompiledMethod*>(-1);
+
+  uint32_t adjustment1 = 0x1000;
+  patcher_.StartOatFile(adjustment1);
+
+  uint32_t method1_offset = 0x100;
+  uint32_t method1_offset_check = patcher_.ReserveSpace(method1_offset, method, ref1);
+  ASSERT_EQ(adjustment1 + method1_offset, mock_->last_reserve_offset_);
+  ASSERT_TRUE(EqualRef(ref1, mock_->last_reserve_method_));
+  ASSERT_EQ(method1_offset, method1_offset_check);
+
+  uint32_t method2_offset = 0x1230;
+  uint32_t method2_reserve_adjustment = 0x10;
+  mock_->next_reserve_adjustment_ = method2_reserve_adjustment;
+  uint32_t method2_offset_adjusted = patcher_.ReserveSpace(method2_offset, method, ref2);
+  ASSERT_EQ(adjustment1 + method2_offset, mock_->last_reserve_offset_);
+  ASSERT_TRUE(EqualRef(ref2, mock_->last_reserve_method_));
+  ASSERT_EQ(method2_offset + method2_reserve_adjustment, method2_offset_adjusted);
+
+  uint32_t end1_offset = 0x4320;
+  uint32_t end1_offset_check = patcher_.ReserveSpaceEnd(end1_offset);
+  ASSERT_EQ(adjustment1 + end1_offset, mock_->last_reserve_offset_);
+  ASSERT_TRUE(EqualRef(kNullMethodRef, mock_->last_reserve_method_));
+  ASSERT_EQ(end1_offset, end1_offset_check);
+
+  uint32_t adjustment2 = 0xd000;
+  patcher_.StartOatFile(adjustment2);
+
+  uint32_t method3_offset = 0xf00;
+  uint32_t method3_offset_check = patcher_.ReserveSpace(method3_offset, method, ref3);
+  ASSERT_EQ(adjustment2 + method3_offset, mock_->last_reserve_offset_);
+  ASSERT_TRUE(EqualRef(ref3, mock_->last_reserve_method_));
+  ASSERT_EQ(method3_offset, method3_offset_check);
+
+  uint32_t end2_offset = 0x2400;
+  uint32_t end2_reserve_adjustment = 0x20;
+  mock_->next_reserve_adjustment_ = end2_reserve_adjustment;
+  uint32_t end2_offset_adjusted = patcher_.ReserveSpaceEnd(end2_offset);
+  ASSERT_EQ(adjustment2 + end2_offset, mock_->last_reserve_offset_);
+  ASSERT_TRUE(EqualRef(kNullMethodRef, mock_->last_reserve_method_));
+  ASSERT_EQ(end2_offset + end2_reserve_adjustment, end2_offset_adjusted);
+}
+
+TEST_F(MultiOatRelativePatcherTest, Write) {
+  std::vector<uint8_t> output;
+  VectorOutputStream vos("output", &output);
+
+  uint32_t adjustment1 = 0x1000;
+  patcher_.StartOatFile(adjustment1);
+
+  uint32_t method1_offset = 0x100;
+  uint32_t method1_offset_check = patcher_.WriteThunks(&vos, method1_offset);
+  ASSERT_EQ(adjustment1 + method1_offset, mock_->last_write_offset_);
+  ASSERT_EQ(method1_offset, method1_offset_check);
+  vos.WriteFully("1", 1);  // Mark method1.
+
+  uint32_t method2_offset = 0x1230;
+  uint32_t method2_alignment_size = 1;
+  uint32_t method2_call_thunk_size = 2;
+  mock_->next_write_alignment_ = method2_alignment_size;
+  mock_->next_write_call_thunk_ = method2_call_thunk_size;
+  uint32_t method2_offset_adjusted = patcher_.WriteThunks(&vos, method2_offset);
+  ASSERT_EQ(adjustment1 + method2_offset, mock_->last_write_offset_);
+  ASSERT_EQ(method2_offset + method2_alignment_size + method2_call_thunk_size,
+            method2_offset_adjusted);
+  vos.WriteFully("2", 1);  // Mark method2.
+
+  EXPECT_EQ(method2_alignment_size, patcher_.CodeAlignmentSize());
+  EXPECT_EQ(method2_call_thunk_size, patcher_.RelativeCallThunksSize());
+
+  uint32_t adjustment2 = 0xd000;
+  patcher_.StartOatFile(adjustment2);
+
+  uint32_t method3_offset = 0xf00;
+  uint32_t method3_alignment_size = 2;
+  uint32_t method3_misc_thunk_size = 1;
+  mock_->next_write_alignment_ = method3_alignment_size;
+  mock_->next_write_misc_thunk_ = method3_misc_thunk_size;
+  uint32_t method3_offset_adjusted = patcher_.WriteThunks(&vos, method3_offset);
+  ASSERT_EQ(adjustment2 + method3_offset, mock_->last_write_offset_);
+  ASSERT_EQ(method3_offset + method3_alignment_size + method3_misc_thunk_size,
+            method3_offset_adjusted);
+  vos.WriteFully("3", 1);  // Mark method3.
+
+  EXPECT_EQ(method3_alignment_size, patcher_.CodeAlignmentSize());
+  EXPECT_EQ(method3_misc_thunk_size, patcher_.MiscThunksSize());
+
+  uint8_t expected_output[] = {
+      '1',
+      0, 'c', 'c', '2',
+      0, 0, 'm', '3',
+  };
+  ASSERT_EQ(arraysize(expected_output), output.size());
+  for (size_t i = 0; i != arraysize(expected_output); ++i) {
+    ASSERT_EQ(expected_output[i], output[i]) << i;
+  }
+}
+
+TEST_F(MultiOatRelativePatcherTest, Patch) {
+  std::vector<uint8_t> code(16);
+
+  uint32_t adjustment1 = 0x1000;
+  patcher_.StartOatFile(adjustment1);
+
+  uint32_t method1_literal_offset = 4u;
+  uint32_t method1_patch_offset = 0x1234u;
+  uint32_t method1_target_offset = 0x8888u;
+  patcher_.PatchCall(&code, method1_literal_offset, method1_patch_offset, method1_target_offset);
+  DCHECK_EQ(method1_literal_offset, mock_->last_literal_offset_);
+  DCHECK_EQ(method1_patch_offset + adjustment1, mock_->last_patch_offset_);
+  DCHECK_EQ(method1_target_offset + adjustment1, mock_->last_target_offset_);
+
+  uint32_t method2_literal_offset = 12u;
+  uint32_t method2_patch_offset = 0x7654u;
+  uint32_t method2_target_offset = 0xccccu;
+  LinkerPatch method2_patch =
+      LinkerPatch::DexCacheArrayPatch(method2_literal_offset, nullptr, 0u, 1234u);
+  patcher_.PatchDexCacheReference(
+      &code, method2_patch, method2_patch_offset, method2_target_offset);
+  DCHECK_EQ(method2_literal_offset, mock_->last_literal_offset_);
+  DCHECK_EQ(method2_patch_offset + adjustment1, mock_->last_patch_offset_);
+  DCHECK_EQ(method2_target_offset + adjustment1, mock_->last_target_offset_);
+
+  uint32_t adjustment2 = 0xd000;
+  patcher_.StartOatFile(adjustment2);
+
+  uint32_t method3_literal_offset = 8u;
+  uint32_t method3_patch_offset = 0x108u;
+  uint32_t method3_target_offset = 0x200u;
+  patcher_.PatchCall(&code, method3_literal_offset, method3_patch_offset, method3_target_offset);
+  DCHECK_EQ(method3_literal_offset, mock_->last_literal_offset_);
+  DCHECK_EQ(method3_patch_offset + adjustment2, mock_->last_patch_offset_);
+  DCHECK_EQ(method3_target_offset + adjustment2, mock_->last_target_offset_);
+}
+
+}  // namespace linker
+}  // namespace art
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 82702dc..6727c17 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -34,7 +34,8 @@
 namespace linker {
 
 std::unique_ptr<RelativePatcher> RelativePatcher::Create(
-    InstructionSet instruction_set, const InstructionSetFeatures* features,
+    InstructionSet instruction_set,
+    const InstructionSetFeatures* features,
     RelativePatcherTargetProvider* provider) {
   class RelativePatcherNone FINAL : public RelativePatcher {
    public:
diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h
index 8a9f3f8..ba37451 100644
--- a/compiler/linker/relative_patcher.h
+++ b/compiler/linker/relative_patcher.h
@@ -83,23 +83,31 @@
   }
 
   // Reserve space for thunks if needed before a method, return adjusted offset.
-  virtual uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method,
+  virtual uint32_t ReserveSpace(uint32_t offset,
+                                const CompiledMethod* compiled_method,
                                 MethodReference method_ref) = 0;
 
   // Reserve space for thunks if needed after the last method, return adjusted offset.
+  // The caller may use this method to preemptively force thunk space reservation and
+  // then resume reservation for more methods. This is useful when there is a gap in
+  // the .text segment, for example when going to the next oat file for multi-image.
   virtual uint32_t ReserveSpaceEnd(uint32_t offset) = 0;
 
-  // Write relative call thunks if needed, return adjusted offset.
+  // Write relative call thunks if needed, return adjusted offset. Returns 0 on write failure.
   virtual uint32_t WriteThunks(OutputStream* out, uint32_t offset) = 0;
 
   // Patch method code. The input displacement is relative to the patched location,
   // the patcher may need to adjust it if the correct base is different.
-  virtual void PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                         uint32_t patch_offset, uint32_t target_offset) = 0;
+  virtual void PatchCall(std::vector<uint8_t>* code,
+                         uint32_t literal_offset,
+                         uint32_t patch_offset,
+                         uint32_t target_offset) = 0;
 
   // Patch a reference to a dex cache location.
-  virtual void PatchDexCacheReference(std::vector<uint8_t>* code, const LinkerPatch& patch,
-                                      uint32_t patch_offset, uint32_t target_offset) = 0;
+  virtual void PatchDexCacheReference(std::vector<uint8_t>* code,
+                                      const LinkerPatch& patch,
+                                      uint32_t patch_offset,
+                                      uint32_t target_offset) = 0;
 
  protected:
   RelativePatcher()
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index bf8e786..704135a 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -44,10 +44,22 @@
       : compiler_options_(),
         verification_results_(&compiler_options_),
         inliner_map_(),
-        driver_(&compiler_options_, &verification_results_, &inliner_map_,
-                Compiler::kQuick, instruction_set, nullptr,
-                false, nullptr, nullptr, nullptr, 1u,
-                false, false, nullptr, -1, nullptr, nullptr),
+        driver_(&compiler_options_,
+                &verification_results_,
+                &inliner_map_,
+                Compiler::kQuick,
+                instruction_set,
+                /* instruction_set_features*/ nullptr,
+                /* boot_image */ false,
+                /* image_classes */ nullptr,
+                /* compiled_classes */ nullptr,
+                /* compiled_methods */ nullptr,
+                /* thread_count */ 1u,
+                /* dump_stats */ false,
+                /* dump_passes */ false,
+                /* timer */ nullptr,
+                /* swap_fd */ -1,
+                /* profile_compilation_info */ nullptr),
         error_msg_(),
         instruction_set_(instruction_set),
         features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
@@ -138,8 +150,10 @@
                                 offset + patch.LiteralOffset(), target_offset);
           } else if (patch.Type() == kLinkerPatchDexCacheArray) {
             uint32_t target_offset = dex_cache_arrays_begin_ + patch.TargetDexCacheElementOffset();
-            patcher_->PatchDexCacheReference(&patched_code_, patch,
-                                             offset + patch.LiteralOffset(), target_offset);
+            patcher_->PatchDexCacheReference(&patched_code_,
+                                             patch,
+                                             offset + patch.LiteralOffset(),
+                                             target_offset);
           } else {
             LOG(FATAL) << "Bad patch type.";
           }
diff --git a/compiler/linker/x86/relative_patcher_x86.h b/compiler/linker/x86/relative_patcher_x86.h
index 0c881f0..ddc244c 100644
--- a/compiler/linker/x86/relative_patcher_x86.h
+++ b/compiler/linker/x86/relative_patcher_x86.h
@@ -26,8 +26,10 @@
  public:
   X86RelativePatcher() { }
 
-  void PatchDexCacheReference(std::vector<uint8_t>* code, const LinkerPatch& patch,
-                              uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
+  void PatchDexCacheReference(std::vector<uint8_t>* code,
+                              const LinkerPatch& patch,
+                              uint32_t patch_offset,
+                              uint32_t target_offset) OVERRIDE;
 };
 
 }  // namespace linker
diff --git a/compiler/linker/x86/relative_patcher_x86_base.cc b/compiler/linker/x86/relative_patcher_x86_base.cc
index bc285a7..bf3a648 100644
--- a/compiler/linker/x86/relative_patcher_x86_base.cc
+++ b/compiler/linker/x86/relative_patcher_x86_base.cc
@@ -34,8 +34,10 @@
   return offset;  // No thunks added; no limit on relative call distance.
 }
 
-void X86BaseRelativePatcher::PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                                       uint32_t patch_offset, uint32_t target_offset) {
+void X86BaseRelativePatcher::PatchCall(std::vector<uint8_t>* code,
+                                       uint32_t literal_offset,
+                                       uint32_t patch_offset,
+                                       uint32_t target_offset) {
   DCHECK_LE(literal_offset + 4u, code->size());
   // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
   uint32_t displacement = target_offset - patch_offset;
diff --git a/compiler/linker/x86/relative_patcher_x86_base.h b/compiler/linker/x86/relative_patcher_x86_base.h
index 9200709..ca83a72 100644
--- a/compiler/linker/x86/relative_patcher_x86_base.h
+++ b/compiler/linker/x86/relative_patcher_x86_base.h
@@ -29,8 +29,10 @@
                         MethodReference method_ref) OVERRIDE;
   uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
   uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
-  void PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset,
-                 uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
+  void PatchCall(std::vector<uint8_t>* code,
+                 uint32_t literal_offset,
+                 uint32_t patch_offset,
+                 uint32_t target_offset) OVERRIDE;
 
  protected:
   X86BaseRelativePatcher() { }
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64.cc b/compiler/linker/x86_64/relative_patcher_x86_64.cc
index 598f3ac..e571f50 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64.cc
+++ b/compiler/linker/x86_64/relative_patcher_x86_64.cc
@@ -23,7 +23,8 @@
 
 void X86_64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
                                                    const LinkerPatch& patch,
-                                                   uint32_t patch_offset, uint32_t target_offset) {
+                                                   uint32_t patch_offset,
+                                                   uint32_t target_offset) {
   DCHECK_LE(patch.LiteralOffset() + 4u, code->size());
   // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
   uint32_t displacement = target_offset - patch_offset;
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64.h b/compiler/linker/x86_64/relative_patcher_x86_64.h
index af687b4..feecb3a 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64.h
+++ b/compiler/linker/x86_64/relative_patcher_x86_64.h
@@ -26,8 +26,10 @@
  public:
   X86_64RelativePatcher() { }
 
-  void PatchDexCacheReference(std::vector<uint8_t>* code, const LinkerPatch& patch,
-                              uint32_t patch_offset, uint32_t target_offset) OVERRIDE;
+  void PatchDexCacheReference(std::vector<uint8_t>* code,
+                              const LinkerPatch& patch,
+                              uint32_t patch_offset,
+                              uint32_t target_offset) OVERRIDE;
 };
 
 }  // namespace linker
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d3b404a..14fd105 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -31,6 +31,7 @@
 #include "elf_writer.h"
 #include "elf_writer_quick.h"
 #include "entrypoints/quick/quick_entrypoints.h"
+#include "linker/multi_oat_relative_patcher.h"
 #include "linker/vector_output_stream.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
@@ -111,17 +112,16 @@
                                               compiler_kind,
                                               insn_set,
                                               insn_features_.get(),
-                                              false,
-                                              nullptr,
-                                              nullptr,
-                                              nullptr,
-                                              2,
-                                              true,
-                                              true,
+                                              /* boot_image */ false,
+                                              /* image_classes */ nullptr,
+                                              /* compiled_classes */ nullptr,
+                                              /* compiled_methods */ nullptr,
+                                              /* thread_count */ 2,
+                                              /* dump_stats */ true,
+                                              /* dump_passes */ true,
                                               timer_.get(),
-                                              -1,
-                                              nullptr,
-                                              nullptr));
+                                              /* swap_fd */ -1,
+                                              /* profile_compilation_info */ nullptr));
   }
 
   bool WriteElf(File* file,
@@ -200,7 +200,13 @@
       ScopedObjectAccess soa(Thread::Current());
       class_linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc());
     }
-    oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files);
+    linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
+                                            instruction_set_features_.get());
+    oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files, &patcher);
+    size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
+    size_t text_size = oat_writer.GetSize() - rodata_size;
+    elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer.GetBssSize());
+
     if (!oat_writer.WriteRodata(rodata)) {
       return false;
     }
@@ -216,7 +222,6 @@
       return false;
     }
 
-    elf_writer->SetBssSize(oat_writer.GetBssSize());
     elf_writer->WriteDynamicSection();
     elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
     elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
@@ -416,7 +421,7 @@
     // TODO We should also check copied methods in this test.
     for (auto& m : klass->GetDeclaredVirtualMethods(pointer_size)) {
       if (!klass->IsInterface()) {
-        EXPECT_FALSE(m.MightBeCopied());
+        EXPECT_FALSE(m.IsCopied());
       }
       CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file);
       ++method_index;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 47dcfd5..c60b02a 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -38,8 +38,8 @@
 #include "gc/space/space.h"
 #include "handle_scope-inl.h"
 #include "image_writer.h"
+#include "linker/multi_oat_relative_patcher.h"
 #include "linker/output_stream.h"
-#include "linker/relative_patcher.h"
 #include "mirror/array.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
@@ -292,7 +292,8 @@
     size_oat_class_status_(0),
     size_oat_class_method_bitmaps_(0),
     size_oat_class_method_offsets_(0),
-    method_offset_map_() {
+    relative_patcher_(nullptr),
+    absolute_patch_locations_() {
 }
 
 bool OatWriter::AddDexFileSource(const char* filename,
@@ -438,21 +439,21 @@
 
 void OatWriter::PrepareLayout(const CompilerDriver* compiler,
                               ImageWriter* image_writer,
-                              const std::vector<const DexFile*>& dex_files) {
+                              const std::vector<const DexFile*>& dex_files,
+                              linker::MultiOatRelativePatcher* relative_patcher) {
   CHECK(write_state_ == WriteState::kPrepareLayout);
 
-  dex_files_ = &dex_files;
-
   compiler_driver_ = compiler;
   image_writer_ = image_writer;
+  dex_files_ = &dex_files;
+  relative_patcher_ = relative_patcher;
+  SetMultiOatRelativePatcherAdjustment();
+
   if (compiling_boot_image_) {
     CHECK(image_writer_ != nullptr);
   }
   InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
   CHECK_EQ(instruction_set, oat_header_->GetInstructionSet());
-  const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
-  relative_patcher_ = linker::RelativePatcher::Create(instruction_set, features,
-                                                      &method_offset_map_);
 
   uint32_t offset = size_;
   {
@@ -727,13 +728,11 @@
       // Deduplicate code arrays if we are not producing debuggable code.
       bool deduped = false;
       MethodReference method_ref(dex_file_, it.GetMemberIndex());
-      auto method_lb = writer_->method_offset_map_.map.lower_bound(method_ref);
       if (debuggable_) {
-        if (method_lb != writer_->method_offset_map_.map.end() &&
-            !writer_->method_offset_map_.map.key_comp()(method_ref, method_lb->first)) {
+        quick_code_offset = writer_->relative_patcher_->GetOffset(method_ref);
+        if (quick_code_offset != 0u) {
           // Duplicate methods, we want the same code for both of them so that the oat writer puts
           // the same code in both ArtMethods so that we do not get different oat code at runtime.
-          quick_code_offset = method_lb->second;
           deduped = true;
         } else {
           quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
@@ -750,14 +749,14 @@
       }
 
       if (code_size != 0) {
-        if (method_lb != writer_->method_offset_map_.map.end() &&
-            !writer_->method_offset_map_.map.key_comp()(method_ref, method_lb->first)) {
+        if (writer_->relative_patcher_->GetOffset(method_ref) != 0u) {
           // TODO: Should this be a hard failure?
           LOG(WARNING) << "Multiple definitions of "
               << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file)
-              << " offsets " << method_lb->second << " " << quick_code_offset;
+              << " offsets " << writer_->relative_patcher_->GetOffset(method_ref)
+              << " " << quick_code_offset;
         } else {
-          writer_->method_offset_map_.map.PutBefore(method_lb, method_ref, quick_code_offset);
+          writer_->relative_patcher_->SetOffset(method_ref, quick_code_offset);
         }
       }
 
@@ -1106,27 +1105,29 @@
           patched_code_.assign(quick_code.begin(), quick_code.end());
           quick_code = ArrayRef<const uint8_t>(patched_code_);
           for (const LinkerPatch& patch : compiled_method->GetPatches()) {
+            uint32_t literal_offset = patch.LiteralOffset();
             if (patch.Type() == kLinkerPatchCallRelative) {
               // NOTE: Relative calls across oat files are not supported.
               uint32_t target_offset = GetTargetOffset(patch);
-              uint32_t literal_offset = patch.LiteralOffset();
-              writer_->relative_patcher_->PatchCall(&patched_code_, literal_offset,
-                                                     offset_ + literal_offset, target_offset);
+              writer_->relative_patcher_->PatchCall(&patched_code_,
+                                                    literal_offset,
+                                                    offset_ + literal_offset,
+                                                    target_offset);
             } else if (patch.Type() == kLinkerPatchDexCacheArray) {
               uint32_t target_offset = GetDexCacheOffset(patch);
-              uint32_t literal_offset = patch.LiteralOffset();
-              writer_->relative_patcher_->PatchDexCacheReference(&patched_code_, patch,
+              writer_->relative_patcher_->PatchDexCacheReference(&patched_code_,
+                                                                 patch,
                                                                  offset_ + literal_offset,
                                                                  target_offset);
             } else if (patch.Type() == kLinkerPatchCall) {
               uint32_t target_offset = GetTargetOffset(patch);
-              PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset);
+              PatchCodeAddress(&patched_code_, literal_offset, target_offset);
             } else if (patch.Type() == kLinkerPatchMethod) {
               ArtMethod* method = GetTargetMethod(patch);
-              PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method);
+              PatchMethodAddress(&patched_code_, literal_offset, method);
             } else if (patch.Type() == kLinkerPatchType) {
               mirror::Class* type = GetTargetType(patch);
-              PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type);
+              PatchObjectAddress(&patched_code_, literal_offset, type);
             }
           }
         }
@@ -1172,16 +1173,16 @@
   }
 
   uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
-    auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod());
-    uint32_t target_offset =
-        (target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u;
-    // If there's no compiled code, point to the correct trampoline.
+    uint32_t target_offset = writer_->relative_patcher_->GetOffset(patch.TargetMethod());
+    // If there's no new compiled code, either we're compiling an app and the target method
+    // is in the boot image, or we need to point to the correct trampoline.
     if (UNLIKELY(target_offset == 0)) {
       ArtMethod* target = GetTargetMethod(patch);
       DCHECK(target != nullptr);
       size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
       const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
       if (oat_code_offset != 0) {
+        DCHECK(!writer_->HasBootImage());
         DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
         DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(oat_code_offset));
         DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(oat_code_offset));
@@ -1206,11 +1207,10 @@
 
   uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (writer_->HasBootImage()) {
-      auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
-              patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
-      const char* oat_filename = writer_->image_writer_->GetOatFilenameForDexCache(dex_cache_);
-      const uint8_t* oat_data =
-          writer_->image_writer_->GetOatFileBegin(oat_filename) + file_offset_;
+      uintptr_t element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<uintptr_t>(
+          patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
+      size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
+      uintptr_t oat_data = writer_->image_writer_->GetOatDataBegin(oat_index);
       return element - oat_data;
     } else {
       size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
@@ -1270,9 +1270,13 @@
       SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t address = target_offset;
     if (writer_->HasBootImage()) {
-      const char* oat_filename = writer_->image_writer_->GetOatFilenameForDexCache(dex_cache_);
-      address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin(oat_filename) +
-                                      writer_->oat_data_offset_ + target_offset);
+      size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
+      // TODO: Clean up offset types.
+      // The target_offset must be treated as signed for cross-oat patching.
+      const void* target = reinterpret_cast<const void*>(
+          writer_->image_writer_->GetOatDataBegin(oat_index) +
+          static_cast<int32_t>(target_offset));
+      address = PointerToLowMemUInt32(target);
     }
     DCHECK_LE(offset + 4, code->size());
     uint8_t* data = &(*code)[offset];
@@ -1540,6 +1544,8 @@
 bool OatWriter::WriteCode(OutputStream* out) {
   CHECK(write_state_ == WriteState::kWriteText);
 
+  SetMultiOatRelativePatcherAdjustment();
+
   const size_t file_offset = oat_data_offset_;
   size_t relative_offset = oat_header_->GetExecutableOffset();
   DCHECK_OFFSET();
@@ -1781,7 +1787,7 @@
   return relative_offset;
 }
 
-bool OatWriter::GetOatDataOffset(OutputStream* out) {
+bool OatWriter::RecordOatDataOffset(OutputStream* out) {
   // Get the elf file offset of the oat file.
   const off_t raw_file_offset = out->Seek(0, kSeekCurrent);
   if (raw_file_offset == static_cast<off_t>(-1)) {
@@ -1833,7 +1839,7 @@
   TimingLogger::ScopedTiming split("WriteDexFiles", timings_);
 
   // Get the elf file offset of the oat file.
-  if (!GetOatDataOffset(rodata)) {
+  if (!RecordOatDataOffset(rodata)) {
     return false;
   }
 
@@ -2261,12 +2267,15 @@
   return out->WriteFully(data, size);
 }
 
-std::pair<bool, uint32_t> OatWriter::MethodOffsetMap::FindMethodOffset(MethodReference ref) {
-  auto it = map.find(ref);
-  if (it == map.end()) {
-    return std::pair<bool, uint32_t>(false, 0u);
-  } else {
-    return std::pair<bool, uint32_t>(true, it->second);
+void OatWriter::SetMultiOatRelativePatcherAdjustment() {
+  DCHECK(dex_files_ != nullptr);
+  DCHECK(relative_patcher_ != nullptr);
+  DCHECK_NE(oat_data_offset_, 0u);
+  if (image_writer_ != nullptr && !dex_files_->empty()) {
+    // The oat data begin may not be initialized yet but the oat file offset is ready.
+    size_t oat_index = image_writer_->GetOatIndexForDexFile(dex_files_->front());
+    size_t elf_file_offset = image_writer_->GetOatFileOffset(oat_index);
+    relative_patcher_->StartOatFile(elf_file_offset + oat_data_offset_);
   }
 }
 
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 5a55fc6..74aab4e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -47,6 +47,10 @@
 struct MethodDebugInfo;
 }  // namespace debug
 
+namespace linker {
+class MultiOatRelativePatcher;
+}  // namespace linker
+
 // OatHeader         variable length with count of D OatDexFiles
 //
 // OatDexFile[0]     one variable sized OatDexFile with offsets to Dex and OatClasses
@@ -153,7 +157,8 @@
   // Prepare layout of remaining data.
   void PrepareLayout(const CompilerDriver* compiler,
                      ImageWriter* image_writer,
-                     const std::vector<const DexFile*>& dex_files);
+                     const std::vector<const DexFile*>& dex_files,
+                     linker::MultiOatRelativePatcher* relative_patcher);
   // Write the rest of .rodata section (ClassOffsets[], OatClass[], maps).
   bool WriteRodata(OutputStream* out);
   // Write the code to the .text section.
@@ -187,6 +192,10 @@
     return bss_size_;
   }
 
+  size_t GetOatDataOffset() const {
+    return oat_data_offset_;
+  }
+
   ArrayRef<const uintptr_t> GetAbsolutePatchLocations() const {
     return ArrayRef<const uintptr_t>(absolute_patch_locations_);
   }
@@ -249,7 +258,7 @@
   size_t WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset);
   size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
 
-  bool GetOatDataOffset(OutputStream* out);
+  bool RecordOatDataOffset(OutputStream* out);
   bool ReadDexFileHeader(File* file, OatDexFile* oat_dex_file);
   bool ValidateDexFileHeader(const uint8_t* raw_header, const char* location);
   bool WriteDexFiles(OutputStream* rodata, File* file);
@@ -268,6 +277,7 @@
                              const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
   bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
   bool WriteData(OutputStream* out, const void* data, size_t size);
+  void SetMultiOatRelativePatcherAdjustment();
 
   enum class WriteState {
     kAddingDexFileSources,
@@ -358,20 +368,12 @@
   uint32_t size_oat_class_method_bitmaps_;
   uint32_t size_oat_class_method_offsets_;
 
-  std::unique_ptr<linker::RelativePatcher> relative_patcher_;
+  // The helper for processing relative patches is external so that we can patch across oat files.
+  linker::MultiOatRelativePatcher* relative_patcher_;
 
   // The locations of absolute patches relative to the start of the executable section.
   dchecked_vector<uintptr_t> absolute_patch_locations_;
 
-  // Map method reference to assigned offset.
-  // Wrap the map in a class implementing linker::RelativePatcherTargetProvider.
-  class MethodOffsetMap FINAL : public linker::RelativePatcherTargetProvider {
-   public:
-    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
-    SafeMap<MethodReference, uint32_t, MethodReferenceComparator> map;
-  };
-  MethodOffsetMap method_offset_map_;
-
   DISALLOW_COPY_AND_ASSIGN(OatWriter);
 };
 
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index ba1b168..f2929bc 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -67,20 +67,28 @@
   static bool IsAddOrSubAConstant(HInstruction* instruction,
                                   /* out */ HInstruction** left_instruction,
                                   /* out */ int32_t* right_constant) {
-    if (instruction->IsAdd() || instruction->IsSub()) {
+    HInstruction* left_so_far = nullptr;
+    int32_t right_so_far = 0;
+    while (instruction->IsAdd() || instruction->IsSub()) {
       HBinaryOperation* bin_op = instruction->AsBinaryOperation();
       HInstruction* left = bin_op->GetLeft();
       HInstruction* right = bin_op->GetRight();
       if (right->IsIntConstant()) {
-        *left_instruction = left;
-        int32_t c = right->AsIntConstant()->GetValue();
-        *right_constant = instruction->IsAdd() ? c : -c;
-        return true;
+        int32_t v = right->AsIntConstant()->GetValue();
+        int32_t c = instruction->IsAdd() ? v : -v;
+        if (!WouldAddOverflowOrUnderflow(right_so_far, c)) {
+          instruction = left;
+          left_so_far = left;
+          right_so_far += c;
+          continue;
+        }
       }
+      break;
     }
-    *left_instruction = nullptr;
-    *right_constant = 0;
-    return false;
+    // Return result: either false and "null+0" or true and "instr+constant".
+    *left_instruction = left_so_far;
+    *right_constant = right_so_far;
+    return left_so_far != nullptr;
   }
 
   // Expresses any instruction as a value bound.
@@ -525,6 +533,8 @@
         first_index_bounds_check_map_(
             std::less<int>(),
             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+        dynamic_bce_standby_(
+            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
         early_exit_loop_(
             std::less<uint32_t>(),
             graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
@@ -545,6 +555,13 @@
   }
 
   void Finish() {
+    // Retry dynamic bce candidates on standby that are still in the graph.
+    for (HBoundsCheck* bounds_check : dynamic_bce_standby_) {
+      if (bounds_check->IsInBlock()) {
+        TryDynamicBCE(bounds_check);
+      }
+    }
+
     // Preserve SSA structure which may have been broken by adding one or more
     // new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()).
     InsertPhiNodes();
@@ -553,6 +570,7 @@
     early_exit_loop_.clear();
     taken_test_loop_.clear();
     finite_loop_.clear();
+    dynamic_bce_standby_.clear();
   }
 
  private:
@@ -1293,7 +1311,7 @@
     if (DynamicBCESeemsProfitable(loop, instruction->GetBlock()) &&
         induction_range_.CanGenerateCode(
             instruction, index, &needs_finite_test, &needs_taken_test) &&
-        CanHandleInfiniteLoop(loop, index, needs_finite_test) &&
+        CanHandleInfiniteLoop(loop, instruction, index, needs_finite_test) &&
         CanHandleLength(loop, length, needs_taken_test)) {  // do this test last (may code gen)
       HInstruction* lower = nullptr;
       HInstruction* upper = nullptr;
@@ -1425,7 +1443,7 @@
    * ensure the loop is finite.
    */
   bool CanHandleInfiniteLoop(
-      HLoopInformation* loop, HInstruction* index, bool needs_infinite_test) {
+      HLoopInformation* loop, HBoundsCheck* check, HInstruction* index, bool needs_infinite_test) {
     if (needs_infinite_test) {
       // If we already forced the loop to be finite, allow directly.
       const uint32_t loop_id = loop->GetHeader()->GetBlockId();
@@ -1447,6 +1465,9 @@
           }
         }
       }
+      // If bounds check made it this far, it is worthwhile to check later if
+      // the loop was forced finite by another candidate.
+      dynamic_bce_standby_.push_back(check);
       return false;
     }
     return true;
@@ -1668,6 +1689,9 @@
   // in a block that checks an index against that HArrayLength.
   ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
 
+  // Stand by list for dynamic bce.
+  ArenaVector<HBoundsCheck*> dynamic_bce_standby_;
+
   // Early-exit loop bookkeeping.
   ArenaSafeMap<uint32_t, bool> early_exit_loop_;
 
@@ -1703,21 +1727,18 @@
   // that value dominated by that instruction fits in that range. Range of that
   // value can be narrowed further down in the dominator tree.
   BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
-  HBasicBlock* last_visited_block = nullptr;
   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* current = it.Current();
-    if (current == last_visited_block) {
-      // We may insert blocks into the reverse post order list when processing
-      // a loop header. Don't process it again.
-      DCHECK(current->IsLoopHeader());
-      continue;
-    }
     if (visitor.IsAddedBlock(current)) {
       // Skip added blocks. Their effects are already taken care of.
       continue;
     }
     visitor.VisitBasicBlock(current);
-    last_visited_block = current;
+    // Skip forward to the current block in case new basic blocks were inserted
+    // (which always appear earlier in reverse post order) to avoid visiting the
+    // same basic block twice.
+    for ( ; !it.Done() && it.Current() != current; it.Advance()) {
+    }
   }
 
   // Perform cleanup.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 05e1356..57660c2 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -282,7 +282,7 @@
         // Found a predecessor not covered by the same TryItem. Insert entering
         // boundary block.
         HTryBoundary* try_entry =
-            new (arena_) HTryBoundary(HTryBoundary::kEntry, try_block->GetDexPc());
+            new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
         try_block->CreateImmediateDominator()->AddInstruction(try_entry);
         LinkToCatchBlocks(try_entry, code_item, entry.second);
         break;
@@ -316,7 +316,7 @@
 
       // Insert TryBoundary and link to catch blocks.
       HTryBoundary* try_exit =
-          new (arena_) HTryBoundary(HTryBoundary::kExit, successor->GetDexPc());
+          new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
       graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit);
       LinkToCatchBlocks(try_exit, code_item, entry.second);
     }
@@ -368,7 +368,6 @@
   if (native_debuggable) {
     const uint32_t num_instructions = code_item.insns_size_in_code_units_;
     native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
-    native_debug_info_locations->ClearAllBits();
     FindNativeDebugInfoLocations(code_item, native_debug_info_locations);
   }
 
@@ -443,23 +442,15 @@
     }
   };
   dex_file_->DecodeDebugPositionInfo(&code_item, Callback::Position, locations);
-  // Add native debug info at the start of every basic block.
-  for (uint32_t pc = 0; pc < code_item.insns_size_in_code_units_; pc++) {
-    if (FindBlockStartingAt(pc) != nullptr) {
-      locations->SetBit(pc);
-    }
-  }
   // Instruction-specific tweaks.
   const Instruction* const begin = Instruction::At(code_item.insns_);
   const Instruction* const end = begin->RelativeAt(code_item.insns_size_in_code_units_);
   for (const Instruction* inst = begin; inst < end; inst = inst->Next()) {
     switch (inst->Opcode()) {
-      case Instruction::MOVE_EXCEPTION:
-      case Instruction::MOVE_RESULT:
-      case Instruction::MOVE_RESULT_WIDE:
-      case Instruction::MOVE_RESULT_OBJECT: {
-        // The compiler checks that there are no instructions before those.
-        // So generate HNativeDebugInfo after them instead.
+      case Instruction::MOVE_EXCEPTION: {
+        // Stop in native debugger after the exception has been moved.
+        // The compiler also expects the move at the start of basic block so
+        // we do not want to interfere by inserting native-debug-info before it.
         locations->ClearBit(inst->GetDexPc(code_item.insns_));
         const Instruction* next = inst->Next();
         if (next < end) {
@@ -2852,7 +2843,7 @@
     case Instruction::MONITOR_ENTER: {
       current_block_->AddInstruction(new (arena_) HMonitorOperation(
           LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot, dex_pc),
-          HMonitorOperation::kEnter,
+          HMonitorOperation::OperationKind::kEnter,
           dex_pc));
       break;
     }
@@ -2860,7 +2851,7 @@
     case Instruction::MONITOR_EXIT: {
       current_block_->AddInstruction(new (arena_) HMonitorOperation(
           LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot, dex_pc),
-          HMonitorOperation::kExit,
+          HMonitorOperation::OperationKind::kExit,
           dex_pc));
       break;
     }
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c2c8ccf..967d156 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -195,6 +195,8 @@
     if (disasm_info_ != nullptr) {
       code_start = GetAssembler()->CodeSize();
     }
+    // Record the dex pc at start of slow path (required for java line number mapping).
+    MaybeRecordNativeDebugInfo(nullptr /* instruction */, slow_path->GetDexPc());
     slow_path->EmitNativeCode(this);
     if (disasm_info_ != nullptr) {
       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
@@ -226,6 +228,10 @@
     // errors where we reference that label.
     if (block->IsSingleJump()) continue;
     Bind(block);
+    // This ensures that we have correct native line mapping for all native instructions.
+    // It is necessary to make stepping over a statement work. Otherwise, any initial
+    // instructions (e.g. moves) would be assumed to be the start of next statement.
+    MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       DisassemblyScope disassembly_scope(current, *this);
@@ -733,7 +739,8 @@
   uint32_t native_pc = GetAssembler()->CodeSize();
 
   if (instruction == nullptr) {
-    // For stack overflow checks.
+    // For stack overflow checks and native-debug-info entries without dex register
+    // mapping (i.e. start of basic block or start of slow path).
     stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
     stack_map_stream_.EndStackMapEntry();
     return;
@@ -808,6 +815,16 @@
   return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc;
 }
 
+void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc) {
+  if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
+    if (HasStackMapAtCurrentPc()) {
+      // Ensure that we do not collide with the stack map of the previous instruction.
+      GenerateNop();
+    }
+    RecordPcInfo(instruction, dex_pc);
+  }
+}
+
 void CodeGenerator::RecordCatchBlockInfo() {
   ArenaAllocator* arena = graph_->GetArena();
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 49c193e..9297fc9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -69,7 +69,7 @@
 
 class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
  public:
-  SlowPathCode() {
+  explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) {
     for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
       saved_core_stack_offsets_[i] = kRegisterNotSaved;
       saved_fpu_stack_offsets_[i] = kRegisterNotSaved;
@@ -106,9 +106,15 @@
   Label* GetEntryLabel() { return &entry_label_; }
   Label* GetExitLabel() { return &exit_label_; }
 
+  uint32_t GetDexPc() const {
+    return instruction_ != nullptr ? instruction_->GetDexPc() : kNoDexPc;
+  }
+
  protected:
   static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
   static constexpr uint32_t kRegisterNotSaved = -1;
+  // The instruction where this slow path is happening.
+  HInstruction* instruction_;
   uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
   uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
 
@@ -267,6 +273,8 @@
   void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
   // Check whether we have already recorded mapping at this PC.
   bool HasStackMapAtCurrentPc();
+  // Record extra stack maps if we support native debugging.
+  void MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc);
 
   bool CanMoveNullCheckToUser(HNullCheck* null_check);
   void MaybeRecordImplicitNullCheck(HInstruction* instruction);
@@ -440,6 +448,8 @@
   // Copy the result of a call into the given target.
   virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
 
+  virtual void GenerateNop() = 0;
+
  protected:
   // Method patch info used for recording locations of required linker patches and
   // target methods. The target method can be used for various purposes, whether for
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 87f52c6..aa9b01f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -64,7 +64,7 @@
 
 class NullCheckSlowPathARM : public SlowPathCode {
  public:
-  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
+  explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -83,13 +83,12 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
 
  private:
-  HNullCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
 };
 
 class DivZeroCheckSlowPathARM : public SlowPathCode {
  public:
-  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -108,14 +107,13 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
 };
 
 class SuspendCheckSlowPathARM : public SlowPathCode {
  public:
   SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCode(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -144,7 +142,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
 
  private:
-  HSuspendCheck* const instruction_;
   // If not null, the block to branch to after the suspend check.
   HBasicBlock* const successor_;
 
@@ -157,7 +154,7 @@
 class BoundsCheckSlowPathARM : public SlowPathCode {
  public:
   explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
-      : instruction_(instruction) {}
+      : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -188,8 +185,6 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
 };
 
@@ -199,7 +194,7 @@
                        HInstruction* at,
                        uint32_t dex_pc,
                        bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -253,7 +248,7 @@
 
 class LoadStringSlowPathARM : public SlowPathCode {
  public:
-  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -264,7 +259,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
     arm_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -277,15 +273,13 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
 };
 
 class TypeCheckSlowPathARM : public SlowPathCode {
  public:
   TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
-      : instruction_(instruction), is_fatal_(is_fatal) {}
+      : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -340,7 +334,6 @@
   bool IsFatal() const OVERRIDE { return is_fatal_; }
 
  private:
-  HInstruction* const instruction_;
   const bool is_fatal_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
@@ -349,7 +342,7 @@
 class DeoptimizationSlowPathARM : public SlowPathCode {
  public:
   explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
-    : instruction_(instruction) {}
+    : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -365,13 +358,12 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
 };
 
 class ArraySetSlowPathARM : public SlowPathCode {
  public:
-  explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
+  explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -410,8 +402,6 @@
   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
 };
 
@@ -419,7 +409,7 @@
 class ReadBarrierMarkSlowPathARM : public SlowPathCode {
  public:
   ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location out, Location obj)
-      : instruction_(instruction), out_(out), obj_(obj) {
+      : SlowPathCode(instruction), out_(out), obj_(obj) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -458,7 +448,6 @@
   }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location obj_;
 
@@ -474,7 +463,7 @@
                                          Location obj,
                                          uint32_t offset,
                                          Location index)
-      : instruction_(instruction),
+      : SlowPathCode(instruction),
         out_(out),
         ref_(ref),
         obj_(obj),
@@ -629,7 +618,6 @@
     UNREACHABLE();
   }
 
-  HInstruction* const instruction_;
   const Location out_;
   const Location ref_;
   const Location obj_;
@@ -646,7 +634,7 @@
 class ReadBarrierForRootSlowPathARM : public SlowPathCode {
  public:
   ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root)
-      : instruction_(instruction), out_(out), root_(root) {
+      : SlowPathCode(instruction), out_(out), root_(root) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -679,7 +667,6 @@
   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM"; }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location root_;
 
@@ -1557,11 +1544,11 @@
 }
 
 void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorARM::GenerateNop() {
+  __ nop();
 }
 
 void LocationsBuilderARM::HandleCondition(HCondition* cond) {
@@ -6426,6 +6413,33 @@
   return DeduplicateMethodLiteral(target_method, &call_patches_);
 }
 
+void LocationsBuilderARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+  locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
+                     Location::RequiresRegister());
+  locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
+  locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
+  LocationSummary* locations = instr->GetLocations();
+  Register res = locations->Out().AsRegister<Register>();
+  Register accumulator =
+      locations->InAt(HMultiplyAccumulate::kInputAccumulatorIndex).AsRegister<Register>();
+  Register mul_left =
+      locations->InAt(HMultiplyAccumulate::kInputMulLeftIndex).AsRegister<Register>();
+  Register mul_right =
+      locations->InAt(HMultiplyAccumulate::kInputMulRightIndex).AsRegister<Register>();
+
+  if (instr->GetOpKind() == HInstruction::kAdd) {
+    __ mla(res, mul_left, mul_right, accumulator);
+  } else {
+    __ mls(res, mul_left, mul_right, accumulator);
+  }
+}
+
 void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   // Nothing to do, this should be removed during prepare for register allocator.
   LOG(FATAL) << "Unreachable";
@@ -6580,7 +6594,7 @@
 void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   uint32_t method_offset = 0;
-  if (instruction->GetTableKind() == HClassTableGet::kVTable) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
     method_offset = mirror::Class::EmbeddedVTableEntryOffset(
         instruction->GetIndex(), kArmPointerSize).SizeValue();
   } else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index cfd7a3b..06e7c00 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -159,6 +159,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -197,6 +198,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -510,6 +512,8 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
+  void GenerateNop();
+
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 435ae5e..985dc05 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -219,7 +219,7 @@
 
 class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {}
+  explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -246,14 +246,12 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
 };
 
 class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -272,7 +270,6 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
 };
 
@@ -282,7 +279,7 @@
                          HInstruction* at,
                          uint32_t dex_pc,
                          bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -337,7 +334,7 @@
 
 class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -348,7 +345,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
     arm64_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -362,14 +360,12 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
 };
 
 class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
+  explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -388,15 +384,13 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
 
  private:
-  HNullCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
 };
 
 class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCodeARM64(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -425,7 +419,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
 
  private:
-  HSuspendCheck* const instruction_;
   // If not null, the block to branch to after the suspend check.
   HBasicBlock* const successor_;
 
@@ -438,7 +431,7 @@
 class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
-      : instruction_(instruction), is_fatal_(is_fatal) {}
+      : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -487,7 +480,6 @@
   bool IsFatal() const { return is_fatal_; }
 
  private:
-  HInstruction* const instruction_;
   const bool is_fatal_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
@@ -496,7 +488,7 @@
 class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
-      : instruction_(instruction) {}
+      : SlowPathCodeARM64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -512,13 +504,12 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
 };
 
 class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit ArraySetSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
+  explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -557,8 +548,6 @@
   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
 };
 
@@ -588,7 +577,7 @@
 class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location out, Location obj)
-      : instruction_(instruction), out_(out), obj_(obj) {
+      : SlowPathCodeARM64(instruction), out_(out), obj_(obj) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -627,7 +616,6 @@
   }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location obj_;
 
@@ -643,7 +631,7 @@
                                            Location obj,
                                            uint32_t offset,
                                            Location index)
-      : instruction_(instruction),
+      : SlowPathCodeARM64(instruction),
         out_(out),
         ref_(ref),
         obj_(obj),
@@ -804,7 +792,6 @@
     UNREACHABLE();
   }
 
-  HInstruction* const instruction_;
   const Location out_;
   const Location ref_;
   const Location obj_;
@@ -821,7 +808,7 @@
 class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root)
-      : instruction_(instruction), out_(out), root_(root) {
+      : SlowPathCodeARM64(instruction), out_(out), root_(root) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -865,7 +852,6 @@
   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location root_;
 
@@ -1876,6 +1862,36 @@
   HandleBinaryOp(instruction);
 }
 
+void LocationsBuilderARM64::VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instr) {
+  DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType();
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+  locations->SetInAt(0, Location::RequiresRegister());
+  // There is no immediate variant of negated bitwise instructions in AArch64.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64BitwiseNegatedRight(
+    HArm64BitwiseNegatedRight* instr) {
+  Register dst = OutputRegister(instr);
+  Register lhs = InputRegisterAt(instr, 0);
+  Register rhs = InputRegisterAt(instr, 1);
+
+  switch (instr->GetOpKind()) {
+    case HInstruction::kAnd:
+      __ Bic(dst, lhs, rhs);
+      break;
+    case HInstruction::kOr:
+      __ Orn(dst, lhs, rhs);
+      break;
+    case HInstruction::kXor:
+      __ Eon(dst, lhs, rhs);
+      break;
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+}
+
 void LocationsBuilderARM64::VisitArm64DataProcWithShifterOp(
     HArm64DataProcWithShifterOp* instruction) {
   DCHECK(instruction->GetType() == Primitive::kPrimInt ||
@@ -1973,21 +1989,27 @@
          Operand(InputOperandAt(instruction, 1)));
 }
 
-void LocationsBuilderARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
+void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
-  locations->SetInAt(HArm64MultiplyAccumulate::kInputAccumulatorIndex,
-                     Location::RequiresRegister());
-  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
-  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
+  HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
+  if (instr->GetOpKind() == HInstruction::kSub &&
+      accumulator->IsConstant() &&
+      accumulator->AsConstant()->IsZero()) {
+    // Don't allocate register for Mneg instruction.
+  } else {
+    locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
+                       Location::RequiresRegister());
+  }
+  locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
+  locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
 }
 
-void InstructionCodeGeneratorARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
+void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
   Register res = OutputRegister(instr);
-  Register accumulator = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputAccumulatorIndex);
-  Register mul_left = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulLeftIndex);
-  Register mul_right = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulRightIndex);
+  Register mul_left = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex);
+  Register mul_right = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex);
 
   // Avoid emitting code that could trigger Cortex A53's erratum 835769.
   // This fixup should be carried out for all multiply-accumulate instructions:
@@ -2007,10 +2029,17 @@
   }
 
   if (instr->GetOpKind() == HInstruction::kAdd) {
+    Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex);
     __ Madd(res, mul_left, mul_right, accumulator);
   } else {
     DCHECK(instr->GetOpKind() == HInstruction::kSub);
-    __ Msub(res, mul_left, mul_right, accumulator);
+    HInstruction* accum_instr = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
+    if (accum_instr->IsConstant() && accum_instr->AsConstant()->IsZero()) {
+      __ Mneg(res, mul_left, mul_right);
+    } else {
+      Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex);
+      __ Msub(res, mul_left, mul_right, accumulator);
+    }
   }
 }
 
@@ -3057,11 +3086,11 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ Nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorARM64::GenerateNop() {
+  __ Nop();
 }
 
 void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -5031,7 +5060,7 @@
 void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   uint32_t method_offset = 0;
-  if (instruction->GetTableKind() == HClassTableGet::kVTable) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
     method_offset = mirror::Class::EmbeddedVTableEntryOffset(
         instruction->GetIndex(), kArm64PointerSize).SizeValue();
   } else {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 360488e..10f1e7f 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -66,7 +66,8 @@
 
 class SlowPathCodeARM64 : public SlowPathCode {
  public:
-  SlowPathCodeARM64() : entry_label_(), exit_label_() {}
+  explicit SlowPathCodeARM64(HInstruction* instruction)
+      : SlowPathCode(instruction), entry_label_(), exit_label_() {}
 
   vixl::Label* GetEntryLabel() { return &entry_label_; }
   vixl::Label* GetExitLabel() { return &exit_label_; }
@@ -195,6 +196,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -284,6 +286,7 @@
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -532,6 +535,8 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
+  void GenerateNop();
+
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c500ea4..f3c12ef 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -39,9 +39,6 @@
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr Register kMethodRegisterArgument = A0;
 
-// We need extra temporary/scratch registers (in addition to AT) in some cases.
-static constexpr FRegister FTMP = F8;
-
 Location MipsReturnLocation(Primitive::Type return_type) {
   switch (return_type) {
     case Primitive::kPrimBoolean:
@@ -149,7 +146,7 @@
 
 class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {}
+  explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -181,14 +178,12 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
 };
 
 class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
@@ -210,7 +205,6 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
 };
 
@@ -220,7 +214,7 @@
                         HInstruction* at,
                         uint32_t dex_pc,
                         bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCodeMIPS(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -279,7 +273,7 @@
 
 class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathMIPS(HLoadString* instruction) : SlowPathCodeMIPS(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -290,7 +284,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
     mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
                                 instruction_,
                                 instruction_->GetDexPc(),
@@ -309,14 +304,12 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
 };
 
 class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {}
+  explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
@@ -338,15 +331,13 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
 
  private:
-  HNullCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
 };
 
 class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
  public:
   SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCodeMIPS(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
@@ -374,7 +365,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
 
  private:
-  HSuspendCheck* const instruction_;
   // If not null, the block to branch to after the suspend check.
   HBasicBlock* const successor_;
 
@@ -386,7 +376,7 @@
 
 class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {}
+  explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -437,15 +427,13 @@
   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
 };
 
 class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
  public:
   explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
-    : instruction_(instruction) {}
+    : SlowPathCodeMIPS(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
@@ -462,7 +450,6 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
 };
 
@@ -3407,11 +3394,11 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ Nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorMIPS::GenerateNop() {
+  __ Nop();
 }
 
 void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
@@ -5258,7 +5245,7 @@
 void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   uint32_t method_offset = 0;
-  if (instruction->GetTableKind() == HClassTableGet::kVTable) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
     method_offset = mirror::Class::EmbeddedVTableEntryOffset(
         instruction->GetIndex(), kMipsPointerSize).SizeValue();
   } else {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index dd0641c..605c794 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -152,7 +152,8 @@
 
 class SlowPathCodeMIPS : public SlowPathCode {
  public:
-  SlowPathCodeMIPS() : entry_label_(), exit_label_() {}
+  explicit SlowPathCodeMIPS(HInstruction* instruction)
+      : SlowPathCode(instruction), entry_label_(), exit_label_() {}
 
   MipsLabel* GetEntryLabel() { return &entry_label_; }
   MipsLabel* GetExitLabel() { return &exit_label_; }
@@ -360,6 +361,8 @@
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
   }
 
+  void GenerateNop();
+
  private:
   // Labels for each block that will be compiled.
   MipsLabel* block_labels_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e3a44f1..c2b84b4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -37,9 +37,6 @@
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr GpuRegister kMethodRegisterArgument = A0;
 
-// We need extra temporary/scratch registers (in addition to AT) in some cases.
-static constexpr FpuRegister FTMP = F8;
-
 Location Mips64ReturnLocation(Primitive::Type return_type) {
   switch (return_type) {
     case Primitive::kPrimBoolean:
@@ -110,7 +107,7 @@
 
 class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
+  explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -141,14 +138,12 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
 };
 
 class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -169,7 +164,6 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
 };
 
@@ -179,7 +173,7 @@
                           HInstruction* at,
                           uint32_t dex_pc,
                           bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -234,7 +228,7 @@
 
 class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : SlowPathCodeMIPS64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -245,7 +239,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
                                   instruction_,
                                   instruction_->GetDexPc(),
@@ -263,14 +258,12 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
 };
 
 class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
+  explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -291,15 +284,13 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
 
  private:
-  HNullCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
 };
 
 class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
   SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCodeMIPS64(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -326,7 +317,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
 
  private:
-  HSuspendCheck* const instruction_;
   // If not null, the block to branch to after the suspend check.
   HBasicBlock* const successor_;
 
@@ -338,7 +328,7 @@
 
 class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
+  explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -384,15 +374,13 @@
   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
 };
 
 class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
   explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
-    : instruction_(instruction) {}
+    : SlowPathCodeMIPS64(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -408,7 +396,6 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
 };
 
@@ -2732,11 +2719,11 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ Nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorMIPS64::GenerateNop() {
+  __ Nop();
 }
 
 void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index eb7315a..ba9eaff 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -152,7 +152,8 @@
 
 class SlowPathCodeMIPS64 : public SlowPathCode {
  public:
-  SlowPathCodeMIPS64() : entry_label_(), exit_label_() {}
+  explicit SlowPathCodeMIPS64(HInstruction* instruction)
+      : SlowPathCode(instruction), entry_label_(), exit_label_() {}
 
   Mips64Label* GetEntryLabel() { return &entry_label_; }
   Mips64Label* GetExitLabel() { return &exit_label_; }
@@ -352,6 +353,8 @@
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
   }
 
+  void GenerateNop();
+
  private:
   // Labels for each block that will be compiled.
   Mips64Label* block_labels_;  // Indexed by block id.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f032f51..6b4a18c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -52,7 +52,7 @@
 
 class NullCheckSlowPathX86 : public SlowPathCode {
  public:
-  explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
+  explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
@@ -73,13 +73,12 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
 
  private:
-  HNullCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
 };
 
 class DivZeroCheckSlowPathX86 : public SlowPathCode {
  public:
-  explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
@@ -100,13 +99,13 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
 };
 
 class DivRemMinusOneSlowPathX86 : public SlowPathCode {
  public:
-  DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {}
+  DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div)
+      : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
@@ -128,7 +127,7 @@
 
 class BoundsCheckSlowPathX86 : public SlowPathCode {
  public:
-  explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : instruction_(instruction) {}
+  explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -160,15 +159,13 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
 };
 
 class SuspendCheckSlowPathX86 : public SlowPathCode {
  public:
   SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCode(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
@@ -199,7 +196,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; }
 
  private:
-  HSuspendCheck* const instruction_;
   HBasicBlock* const successor_;
   Label return_label_;
 
@@ -208,7 +204,7 @@
 
 class LoadStringSlowPathX86 : public SlowPathCode {
  public:
-  explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -219,7 +215,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
     x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
                                instruction_,
                                instruction_->GetDexPc(),
@@ -234,8 +231,6 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
 };
 
@@ -245,7 +240,7 @@
                        HInstruction* at,
                        uint32_t dex_pc,
                        bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -299,7 +294,7 @@
 class TypeCheckSlowPathX86 : public SlowPathCode {
  public:
   TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
-      : instruction_(instruction), is_fatal_(is_fatal) {}
+      : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -356,7 +351,6 @@
   bool IsFatal() const OVERRIDE { return is_fatal_; }
 
  private:
-  HInstruction* const instruction_;
   const bool is_fatal_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
@@ -365,7 +359,7 @@
 class DeoptimizationSlowPathX86 : public SlowPathCode {
  public:
   explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
-    : instruction_(instruction) {}
+    : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
@@ -381,13 +375,12 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
 };
 
 class ArraySetSlowPathX86 : public SlowPathCode {
  public:
-  explicit ArraySetSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
+  explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -426,8 +419,6 @@
   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
 };
 
@@ -435,7 +426,7 @@
 class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
  public:
   ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location out, Location obj)
-      : instruction_(instruction), out_(out), obj_(obj) {
+      : SlowPathCode(instruction), out_(out), obj_(obj) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -474,7 +465,6 @@
   }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location obj_;
 
@@ -490,7 +480,7 @@
                                          Location obj,
                                          uint32_t offset,
                                          Location index)
-      : instruction_(instruction),
+      : SlowPathCode(instruction),
         out_(out),
         ref_(ref),
         obj_(obj),
@@ -645,7 +635,6 @@
     UNREACHABLE();
   }
 
-  HInstruction* const instruction_;
   const Location out_;
   const Location ref_;
   const Location obj_;
@@ -662,7 +651,7 @@
 class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
  public:
   ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root)
-      : instruction_(instruction), out_(out), root_(root) {
+      : SlowPathCode(instruction), out_(out), root_(root) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -695,7 +684,6 @@
   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location root_;
 
@@ -1649,11 +1637,11 @@
 }
 
 void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorX86::GenerateNop() {
+  __ nop();
 }
 
 void LocationsBuilderX86::VisitLocal(HLocal* local) {
@@ -3453,9 +3441,8 @@
           GenerateDivRemWithAnyConstant(instruction);
         }
       } else {
-        SlowPathCode* slow_path =
-          new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.AsRegister<Register>(),
-              is_div);
+        SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(
+            instruction, out.AsRegister<Register>(), is_div);
         codegen_->AddSlowPath(slow_path);
 
         Register second_reg = second.AsRegister<Register>();
@@ -4140,7 +4127,7 @@
 void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   uint32_t method_offset = 0;
-  if (instruction->GetTableKind() == HClassTableGet::kVTable) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
     method_offset = mirror::Class::EmbeddedVTableEntryOffset(
         instruction->GetIndex(), kX86PointerSize).SizeValue();
   } else {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 63e9b2f..0795f3b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -540,6 +540,7 @@
     }
   }
 
+  void GenerateNop();
 
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f3c40b1..c132663 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -56,7 +56,7 @@
 
 class NullCheckSlowPathX86_64 : public SlowPathCode {
  public:
-  explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
+  explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
@@ -77,13 +77,12 @@
   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
 
  private:
-  HNullCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
 };
 
 class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
  public:
-  explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+  explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
@@ -104,14 +103,13 @@
   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
 
  private:
-  HDivZeroCheck* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
 };
 
 class DivRemMinusOneSlowPathX86_64 : public SlowPathCode {
  public:
-  DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div)
-      : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
+  DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, Primitive::Type type, bool is_div)
+      : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
@@ -145,7 +143,7 @@
 class SuspendCheckSlowPathX86_64 : public SlowPathCode {
  public:
   SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
-      : instruction_(instruction), successor_(successor) {}
+      : SlowPathCode(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
@@ -176,7 +174,6 @@
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; }
 
  private:
-  HSuspendCheck* const instruction_;
   HBasicBlock* const successor_;
   Label return_label_;
 
@@ -186,7 +183,7 @@
 class BoundsCheckSlowPathX86_64 : public SlowPathCode {
  public:
   explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
-    : instruction_(instruction) {}
+    : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -218,8 +215,6 @@
   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
 
  private:
-  HBoundsCheck* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
 };
 
@@ -229,7 +224,7 @@
                           HInstruction* at,
                           uint32_t dex_pc,
                           bool do_clinit)
-      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+      : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
@@ -286,7 +281,7 @@
 
 class LoadStringSlowPathX86_64 : public SlowPathCode {
  public:
-  explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
+  explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -297,8 +292,8 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    __ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
-            Immediate(instruction_->GetStringIndex()));
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index));
     x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
                                   instruction_,
                                   instruction_->GetDexPc(),
@@ -312,15 +307,13 @@
   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
 
  private:
-  HLoadString* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
 };
 
 class TypeCheckSlowPathX86_64 : public SlowPathCode {
  public:
   TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
-      : instruction_(instruction), is_fatal_(is_fatal) {}
+      : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -379,7 +372,6 @@
   bool IsFatal() const OVERRIDE { return is_fatal_; }
 
  private:
-  HInstruction* const instruction_;
   const bool is_fatal_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
@@ -388,7 +380,7 @@
 class DeoptimizationSlowPathX86_64 : public SlowPathCode {
  public:
   explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
-      : instruction_(instruction) {}
+      : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
@@ -404,13 +396,12 @@
   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
 
  private:
-  HDeoptimize* const instruction_;
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
 };
 
 class ArraySetSlowPathX86_64 : public SlowPathCode {
  public:
-  explicit ArraySetSlowPathX86_64(HInstruction* instruction) : instruction_(instruction) {}
+  explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
@@ -449,8 +440,6 @@
   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
 
  private:
-  HInstruction* const instruction_;
-
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
 };
 
@@ -458,7 +447,7 @@
 class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
  public:
   ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location out, Location obj)
-      : instruction_(instruction), out_(out), obj_(obj) {
+      : SlowPathCode(instruction), out_(out), obj_(obj) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -497,7 +486,6 @@
   }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location obj_;
 
@@ -513,7 +501,7 @@
                                             Location obj,
                                             uint32_t offset,
                                             Location index)
-      : instruction_(instruction),
+      : SlowPathCode(instruction),
         out_(out),
         ref_(ref),
         obj_(obj),
@@ -667,7 +655,6 @@
     UNREACHABLE();
   }
 
-  HInstruction* const instruction_;
   const Location out_;
   const Location ref_;
   const Location obj_;
@@ -684,7 +671,7 @@
 class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
  public:
   ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root)
-      : instruction_(instruction), out_(out), root_(root) {
+      : SlowPathCode(instruction), out_(out), root_(root) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -716,7 +703,6 @@
   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
 
  private:
-  HInstruction* const instruction_;
   const Location out_;
   const Location root_;
 
@@ -1632,11 +1618,11 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  if (codegen_->HasStackMapAtCurrentPc()) {
-    // Ensure that we do not collide with the stack map of the previous instruction.
-    __ nop();
-  }
-  codegen_->RecordPcInfo(info, info->GetDexPc());
+  codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorX86_64::GenerateNop() {
+  __ nop();
 }
 
 void LocationsBuilderX86_64::VisitLocal(HLocal* local) {
@@ -3546,7 +3532,7 @@
   } else {
     SlowPathCode* slow_path =
         new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64(
-            out.AsRegister(), type, is_div);
+            instruction, out.AsRegister(), type, is_div);
     codegen_->AddSlowPath(slow_path);
 
     CpuRegister second_reg = second.AsRegister<CpuRegister>();
@@ -4012,7 +3998,7 @@
 void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   uint32_t method_offset = 0;
-  if (instruction->GetTableKind() == HClassTableGet::kVTable) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
     method_offset = mirror::Class::EmbeddedVTableEntryOffset(
         instruction->GetIndex(), kX86_64PointerSize).SizeValue();
   } else {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 97f6f84..b3d27e1 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -513,6 +513,8 @@
     }
   }
 
+  void GenerateNop();
+
  private:
   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index c0263e4..b9638f2 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -436,17 +436,23 @@
     StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
   }
 
+#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
+  void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
+    StartAttributeStream("kind") << instruction->GetOpKind();
+  }
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_arm64
+  void VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instruction) OVERRIDE {
+    StartAttributeStream("kind") << instruction->GetOpKind();
+  }
+
   void VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp* instruction) OVERRIDE {
     StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
     if (HArm64DataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
       StartAttributeStream("shift") << instruction->GetShiftAmount();
     }
   }
-
-  void VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instruction) OVERRIDE {
-    StartAttributeStream("kind") << instruction->GetOpKind();
-  }
 #endif
 
   bool IsPass(const char* name) {
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 37f2d79..82a898a 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -379,7 +379,7 @@
                                                                          Primitive::Type type) {
   // Transfer over a shift left: treat shift by restricted constant as equivalent multiplication.
   int64_t value = -1;
-  if (a != nullptr && IsIntAndGet(b, &value)) {
+  if (a != nullptr && IsExact(b, &value)) {
     // Obtain the constant needed for the multiplication. This yields an existing instruction
     // if the constants is already there. Otherwise, this has a side effect on the HIR.
     // The restriction on the shift factor avoids generating a negative constant
@@ -546,14 +546,17 @@
     // Analyze condition with induction at left-hand-side (e.g. i < U).
     InductionInfo* lower_expr = a->op_b;
     InductionInfo* upper_expr = b;
-    InductionInfo* stride = a->op_a;
+    InductionInfo* stride_expr = a->op_a;
+    // Constant stride?
     int64_t stride_value = 0;
-    if (!IsIntAndGet(stride, &stride_value)) {
+    if (!IsExact(stride_expr, &stride_value)) {
       return;
     }
-    // Rewrite condition i != U into i < U or i > U if end condition is reached exactly.
-    if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLT)) ||
-                           (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGT)))) {
+    // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
+    // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
+    // condition would be always taken).
+    if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
+                           (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
       cmp = stride_value > 0 ? kCondLT : kCondGT;
     }
     // Normalize a linear loop control with a nonzero stride:
@@ -561,7 +564,7 @@
     //   stride < 0, either i > U or i >= U
     if ((stride_value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
         (stride_value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
-      VisitTripCount(loop, lower_expr, upper_expr, stride, stride_value, type, cmp);
+      VisitTripCount(loop, lower_expr, upper_expr, stride_expr, stride_value, type, cmp);
     }
   }
 }
@@ -569,7 +572,7 @@
 void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
                                            InductionInfo* lower_expr,
                                            InductionInfo* upper_expr,
-                                           InductionInfo* stride,
+                                           InductionInfo* stride_expr,
                                            int64_t stride_value,
                                            Primitive::Type type,
                                            IfCondition cmp) {
@@ -612,9 +615,10 @@
       trip_count = CreateInvariantOp(kAdd, trip_count, CreateConstant(1, type));
     }
     // Compensate for stride.
-    trip_count = CreateInvariantOp(kAdd, trip_count, stride);
+    trip_count = CreateInvariantOp(kAdd, trip_count, stride_expr);
   }
-  trip_count = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, trip_count, lower_expr), stride);
+  trip_count = CreateInvariantOp(
+      kDiv, CreateInvariantOp(kSub, trip_count, lower_expr), stride_expr);
   // Assign the trip-count expression to the loop control. Clients that use the information
   // should be aware that the expression is only valid under the conditions listed above.
   InductionOp tcKind = kTripCountInBodyUnsafe;  // needs both tests
@@ -644,14 +648,25 @@
                                     IfCondition cmp) {
   int64_t lower_value;
   int64_t upper_value;
-  if (IsIntAndGet(lower_expr, &lower_value) && IsIntAndGet(upper_expr, &upper_value)) {
-    switch (cmp) {
-      case kCondLT: return lower_value <  upper_value;
-      case kCondLE: return lower_value <= upper_value;
-      case kCondGT: return lower_value >  upper_value;
-      case kCondGE: return lower_value >= upper_value;
-      default:      LOG(FATAL) << "CONDITION UNREACHABLE";
-    }
+  switch (cmp) {
+    case kCondLT:
+      return IsAtMost(lower_expr, &lower_value)
+          && IsAtLeast(upper_expr, &upper_value)
+          && lower_value < upper_value;
+    case kCondLE:
+      return IsAtMost(lower_expr, &lower_value)
+          && IsAtLeast(upper_expr, &upper_value)
+          && lower_value <= upper_value;
+    case kCondGT:
+      return IsAtLeast(lower_expr, &lower_value)
+          && IsAtMost(upper_expr, &upper_value)
+          && lower_value > upper_value;
+    case kCondGE:
+      return IsAtLeast(lower_expr, &lower_value)
+          && IsAtMost(upper_expr, &upper_value)
+          && lower_value >= upper_value;
+    default:
+      LOG(FATAL) << "CONDITION UNREACHABLE";
   }
   return false;  // not certain, may be untaken
 }
@@ -660,25 +675,23 @@
                                      int64_t stride_value,
                                      Primitive::Type type,
                                      IfCondition cmp) {
-  const int64_t min = type == Primitive::kPrimInt
-      ? std::numeric_limits<int32_t>::min()
-      : std::numeric_limits<int64_t>::min();
-  const int64_t max = type == Primitive::kPrimInt
-        ? std::numeric_limits<int32_t>::max()
-        : std::numeric_limits<int64_t>::max();
+  const int64_t min = type == Primitive::kPrimInt ? std::numeric_limits<int32_t>::min()
+                                                  : std::numeric_limits<int64_t>::min();
+  const int64_t max = type == Primitive::kPrimInt ? std::numeric_limits<int32_t>::max()
+                                                  : std::numeric_limits<int64_t>::max();
   // Some rules under which it is certain at compile-time that the loop is finite.
   int64_t value;
   switch (cmp) {
     case kCondLT:
       return stride_value == 1 ||
-          (IsIntAndGet(upper_expr, &value) && value <= (max - stride_value + 1));
+          (IsAtMost(upper_expr, &value) && value <= (max - stride_value + 1));
     case kCondLE:
-      return (IsIntAndGet(upper_expr, &value) && value <= (max - stride_value));
+      return (IsAtMost(upper_expr, &value) && value <= (max - stride_value));
     case kCondGT:
       return stride_value == -1 ||
-          (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value - 1));
+          (IsAtLeast(upper_expr, &value) && value >= (min - stride_value - 1));
     case kCondGE:
-      return (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value));
+      return (IsAtLeast(upper_expr, &value) && value >= (min - stride_value));
     default:
       LOG(FATAL) << "CONDITION UNREACHABLE";
   }
@@ -733,7 +746,7 @@
   // More exhaustive simplifications are done by later phases once induction nodes are
   // translated back into HIR code (e.g. by loop optimizations or BCE).
   int64_t value = -1;
-  if (IsIntAndGet(a, &value)) {
+  if (IsExact(a, &value)) {
     if (value == 0) {
       // Simplify 0 + b = b, 0 * b = 0.
       if (op == kAdd) {
@@ -750,7 +763,7 @@
       }
     }
   }
-  if (IsIntAndGet(b, &value)) {
+  if (IsExact(b, &value)) {
     if (value == 0) {
       // Simplify a + 0 = a, a - 0 = a, a * 0 = 0, -0 = 0.
       if (op == kAdd || op == kSub) {
@@ -784,29 +797,16 @@
   return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
 }
 
-bool HInductionVarAnalysis::IsIntAndGet(InductionInfo* info, int64_t* value) {
-  if (info != nullptr && info->induction_class == kInvariant) {
-    // A direct constant fetch.
-    if (info->operation == kFetch) {
-      DCHECK(info->fetch);
-      if (info->fetch->IsIntConstant()) {
-        *value = info->fetch->AsIntConstant()->GetValue();
-        return true;
-      } else if (info->fetch->IsLongConstant()) {
-        *value = info->fetch->AsLongConstant()->GetValue();
-        return true;
-      }
-    }
-    // Use range analysis to resolve compound values.
-    InductionVarRange range(this);
-    int32_t min_val = 0;
-    int32_t max_val = 0;
-    if (range.IsConstantRange(info, &min_val, &max_val) && min_val == max_val) {
-      *value = min_val;
-      return true;
-    }
-  }
-  return false;
+bool HInductionVarAnalysis::IsExact(InductionInfo* info, int64_t* value) {
+  return InductionVarRange(this).IsConstant(info, InductionVarRange::kExact, value);
+}
+
+bool HInductionVarAnalysis::IsAtMost(InductionInfo* info, int64_t* value) {
+  return InductionVarRange(this).IsConstant(info, InductionVarRange::kAtMost, value);
+}
+
+bool HInductionVarAnalysis::IsAtLeast(InductionInfo* info, int64_t* value) {
+  return InductionVarRange(this).IsConstant(info, InductionVarRange::kAtLeast, value);
 }
 
 bool HInductionVarAnalysis::InductionEqual(InductionInfo* info1,
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 84d5d82..94d2646 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -189,7 +189,9 @@
   InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b);
 
   // Constants.
-  bool IsIntAndGet(InductionInfo* info, int64_t* value);
+  bool IsExact(InductionInfo* info, /*out*/ int64_t* value);
+  bool IsAtMost(InductionInfo* info, /*out*/ int64_t* value);
+  bool IsAtLeast(InductionInfo* info, /*out*/ int64_t* value);
 
   // Helpers.
   static bool InductionEqual(InductionInfo* info1, InductionInfo* info2);
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 9566c29..f9b6910 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -45,17 +45,14 @@
   return c2 != 0 && CanLongValueFitIntoInt(static_cast<int64_t>(c1) / static_cast<int64_t>(c2));
 }
 
-/** Returns true for 32/64-bit integral constant. */
-static bool IsIntAndGet(HInstruction* instruction, int32_t* value) {
+/** Returns true for 32/64-bit constant instruction. */
+static bool IsIntAndGet(HInstruction* instruction, int64_t* value) {
   if (instruction->IsIntConstant()) {
     *value = instruction->AsIntConstant()->GetValue();
     return true;
   } else if (instruction->IsLongConstant()) {
-    const int64_t c = instruction->AsLongConstant()->GetValue();
-    if (CanLongValueFitIntoInt(c)) {
-      *value = static_cast<int32_t>(c);
-      return true;
-    }
+    *value = instruction->AsLongConstant()->GetValue();
+    return true;
   }
   return false;
 }
@@ -65,8 +62,9 @@
  * because length >= 0 is true. This makes it more likely the bound is useful to clients.
  */
 static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
-  int32_t value;
-  if (v.a_constant > 1 &&
+  int64_t value;
+  if (v.is_known &&
+      v.a_constant > 1 &&
       v.instruction->IsDiv() &&
       v.instruction->InputAt(0)->IsArrayLength() &&
       IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
@@ -75,6 +73,16 @@
   return v;
 }
 
+/** Helper method to test for a constant value. */
+static bool IsConstantValue(InductionVarRange::Value v) {
+  return v.is_known && v.a_constant == 0;
+}
+
+/** Helper method to test for same constant value. */
+static bool IsSameConstantValue(InductionVarRange::Value v1, InductionVarRange::Value v2) {
+  return IsConstantValue(v1) && IsConstantValue(v2) && v1.b_constant == v2.b_constant;
+}
+
 /** Helper method to insert an instruction. */
 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
   DCHECK(block != nullptr);
@@ -99,29 +107,45 @@
                                           /*out*/Value* max_val,
                                           /*out*/bool* needs_finite_test) {
   HLoopInformation* loop = context->GetBlock()->GetLoopInformation();  // closest enveloping loop
-  if (loop != nullptr) {
-    // Set up loop information.
-    HBasicBlock* header = loop->GetHeader();
-    bool in_body = context->GetBlock() != header;
-    HInductionVarAnalysis::InductionInfo* info =
-        induction_analysis_->LookupInfo(loop, instruction);
-    HInductionVarAnalysis::InductionInfo* trip =
-        induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
-    // Find range.
-    *min_val = GetVal(info, trip, in_body, /* is_min */ true);
-    *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
-    *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
-    return true;
+  if (loop == nullptr) {
+    return false;  // no loop
   }
-  return false;  // Nothing known
+  HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
+  if (info == nullptr) {
+    return false;  // no induction information
+  }
+  // Set up loop information.
+  HBasicBlock* header = loop->GetHeader();
+  bool in_body = context->GetBlock() != header;
+  HInductionVarAnalysis::InductionInfo* trip =
+      induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
+  // Find range.
+  *min_val = GetVal(info, trip, in_body, /* is_min */ true);
+  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
+  *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
+  return true;
 }
 
-bool InductionVarRange::RefineOuter(/*in-out*/Value* min_val, /*in-out*/Value* max_val) const {
-  Value v1 = RefineOuter(*min_val, /* is_min */ true);
-  Value v2 = RefineOuter(*max_val, /* is_min */ false);
-  if (v1.instruction != min_val->instruction || v2.instruction != max_val->instruction) {
-    *min_val = v1;
-    *max_val = v2;
+bool InductionVarRange::RefineOuter(/*in-out*/ Value* min_val,
+                                    /*in-out*/ Value* max_val) const {
+  Value v1_min = RefineOuter(*min_val, /* is_min */ true);
+  Value v2_max = RefineOuter(*max_val, /* is_min */ false);
+  // The refined range is safe if both sides refine the same instruction. Otherwise, since two
+  // different ranges are combined, the new refined range is safe to pass back to the client if
+  // the extremes of the computed ranges ensure no arithmetic wrap-around anomalies occur.
+  if (min_val->instruction != max_val->instruction) {
+    Value v1_max = RefineOuter(*min_val, /* is_min */ false);
+    Value v2_min = RefineOuter(*max_val, /* is_min */ true);
+    if (!IsConstantValue(v1_max) ||
+        !IsConstantValue(v2_min) ||
+        v1_max.b_constant > v2_min.b_constant) {
+      return false;
+    }
+  }
+  // Did something change?
+  if (v1_min.instruction != min_val->instruction || v2_max.instruction != max_val->instruction) {
+    *min_val = v1_min;
+    *max_val = v2_max;
     return true;
   }
   return false;
@@ -164,6 +188,46 @@
 // Private class methods.
 //
 
+bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info,
+                                   ConstantRequest request,
+                                   /*out*/ int64_t *value) const {
+  if (info != nullptr) {
+    // A direct 32-bit or 64-bit constant fetch. This immediately satisfies
+    // any of the three requests (kExact, kAtMost, and KAtLeast).
+    if (info->induction_class == HInductionVarAnalysis::kInvariant &&
+        info->operation == HInductionVarAnalysis::kFetch) {
+      if (IsIntAndGet(info->fetch, value)) {
+        return true;
+      }
+    }
+    // Try range analysis while traversing outward on loops.
+    bool in_body = true;  // no known trip count
+    Value v_min = GetVal(info, nullptr, in_body, /* is_min */ true);
+    Value v_max = GetVal(info, nullptr, in_body, /* is_min */ false);
+    do {
+      // Make sure *both* extremes are known to avoid arithmetic wrap-around anomalies.
+      if (IsConstantValue(v_min) && IsConstantValue(v_max) && v_min.b_constant <= v_max.b_constant) {
+        if ((request == kExact && v_min.b_constant == v_max.b_constant) || request == kAtMost) {
+          *value = v_max.b_constant;
+          return true;
+        } else if (request == kAtLeast) {
+          *value = v_min.b_constant;
+          return true;
+        }
+      }
+    } while (RefineOuter(&v_min, &v_max));
+    // Exploit array length + c >= c, with c <= 0 to avoid arithmetic wrap-around anomalies
+    // (e.g. array length == maxint and c == 1 would yield minint).
+    if (request == kAtLeast) {
+      if (v_min.a_constant == 1 && v_min.b_constant <= 0 && v_min.instruction->IsArrayLength()) {
+        *value = v_min.b_constant;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const {
   if (info != nullptr) {
     if (info->induction_class == HInductionVarAnalysis::kLinear) {
@@ -206,12 +270,10 @@
   if (trip != nullptr) {
     HInductionVarAnalysis::InductionInfo* trip_expr = trip->op_a;
     if (trip_expr->operation == HInductionVarAnalysis::kSub) {
-      int32_t min_value = 0;
-      int32_t stride_value = 0;
-      if (IsConstantRange(info->op_a, &min_value, &stride_value) && min_value == stride_value) {
+      int64_t stride_value = 0;
+      if (IsConstant(info->op_a, kExact, &stride_value)) {
         if (!is_min && stride_value == 1) {
-          // Test original trip's negative operand (trip_expr->op_b) against
-          // the offset of the linear induction.
+          // Test original trip's negative operand (trip_expr->op_b) against offset of induction.
           if (HInductionVarAnalysis::InductionEqual(trip_expr->op_b, info->op_b)) {
             // Analyze cancelled trip with just the positive operand (trip_expr->op_a).
             HInductionVarAnalysis::InductionInfo cancelled_trip(
@@ -219,8 +281,7 @@
             return GetVal(&cancelled_trip, trip, in_body, is_min);
           }
         } else if (is_min && stride_value == -1) {
-          // Test original trip's positive operand (trip_expr->op_a) against
-          // the offset of the linear induction.
+          // Test original trip's positive operand (trip_expr->op_a) against offset of induction.
           if (HInductionVarAnalysis::InductionEqual(trip_expr->op_a, info->op_b)) {
             // Analyze cancelled trip with just the negative operand (trip_expr->op_b).
             HInductionVarAnalysis::InductionInfo neg(
@@ -248,14 +309,16 @@
                                                      bool is_min) const {
   // Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
   // more likely range analysis will compare the same instructions as terminal nodes.
-  int32_t value;
-  if (IsIntAndGet(instruction, &value)) {
-    return Value(value);
+  int64_t value;
+  if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value))  {
+    return Value(static_cast<int32_t>(value));
   } else if (instruction->IsAdd()) {
-    if (IsIntAndGet(instruction->InputAt(0), &value)) {
-      return AddValue(Value(value), GetFetch(instruction->InputAt(1), trip, in_body, is_min));
-    } else if (IsIntAndGet(instruction->InputAt(1), &value)) {
-      return AddValue(GetFetch(instruction->InputAt(0), trip, in_body, is_min), Value(value));
+    if (IsIntAndGet(instruction->InputAt(0), &value) && CanLongValueFitIntoInt(value)) {
+      return AddValue(Value(static_cast<int32_t>(value)),
+                      GetFetch(instruction->InputAt(1), trip, in_body, is_min));
+    } else if (IsIntAndGet(instruction->InputAt(1), &value) && CanLongValueFitIntoInt(value)) {
+      return AddValue(GetFetch(instruction->InputAt(0), trip, in_body, is_min),
+                      Value(static_cast<int32_t>(value)));
     }
   } else if (instruction->IsArrayLength() && instruction->InputAt(0)->IsNewArray()) {
     return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
@@ -331,29 +394,30 @@
   Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
   Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
   Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
-  // Try to refine certain failure.
-  if (v1_min.a_constant && v1_max.a_constant) {
-    v1_min = RefineOuter(v1_min, /* is_min */ true);
-    v1_max = RefineOuter(v1_max, /* is_min */ false);
+  // Try to refine first operand.
+  if (!IsConstantValue(v1_min) && !IsConstantValue(v1_max)) {
+    RefineOuter(&v1_min, &v1_max);
   }
-  // Positive or negative range?
-  if (v1_min.is_known && v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
-    // Positive range vs. positive or negative range.
-    if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
-      return is_min ? MulValue(v1_min, v2_min)
-                    : MulValue(v1_max, v2_max);
-    } else if (v2_max.is_known && v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
-      return is_min ? MulValue(v1_max, v2_min)
-                    : MulValue(v1_min, v2_max);
+  // Constant times range.
+  if (IsSameConstantValue(v1_min, v1_max)) {
+    return MulRangeAndConstant(v2_min, v2_max, v1_min, is_min);
+  } else if (IsSameConstantValue(v2_min, v2_max)) {
+    return MulRangeAndConstant(v1_min, v1_max, v2_min, is_min);
+  }
+  // Positive range vs. positive or negative range.
+  if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
+    if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
+      return is_min ? MulValue(v1_min, v2_min) : MulValue(v1_max, v2_max);
+    } else if (IsConstantValue(v2_max) && v2_max.b_constant <= 0) {
+      return is_min ? MulValue(v1_max, v2_min) : MulValue(v1_min, v2_max);
     }
-  } else if (v1_max.is_known && v1_max.a_constant == 0 && v1_max.b_constant <= 0) {
-    // Negative range vs. positive or negative range.
-    if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
-      return is_min ? MulValue(v1_min, v2_max)
-                    : MulValue(v1_max, v2_min);
-    } else if (v2_max.is_known && v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
-      return is_min ? MulValue(v1_max, v2_max)
-                    : MulValue(v1_min, v2_min);
+  }
+  // Negative range vs. positive or negative range.
+  if (IsConstantValue(v1_max) && v1_max.b_constant <= 0) {
+    if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
+      return is_min ? MulValue(v1_min, v2_max) : MulValue(v1_max, v2_min);
+    } else if (IsConstantValue(v2_max) && v2_max.b_constant <= 0) {
+      return is_min ? MulValue(v1_max, v2_max) : MulValue(v1_min, v2_min);
     }
   }
   return Value();
@@ -368,43 +432,41 @@
   Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
   Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
   Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
-  // Positive or negative range?
-  if (v1_min.is_known && v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
-    // Positive range vs. positive or negative range.
-    if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
-      return is_min ? DivValue(v1_min, v2_max)
-                    : DivValue(v1_max, v2_min);
-    } else if (v2_max.is_known && v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
-      return is_min ? DivValue(v1_max, v2_max)
-                    : DivValue(v1_min, v2_min);
+  // Range divided by constant.
+  if (IsSameConstantValue(v2_min, v2_max)) {
+    return DivRangeAndConstant(v1_min, v1_max, v2_min, is_min);
+  }
+  // Positive range vs. positive or negative range.
+  if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
+    if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
+      return is_min ? DivValue(v1_min, v2_max) : DivValue(v1_max, v2_min);
+    } else if (IsConstantValue(v2_max) && v2_max.b_constant <= 0) {
+      return is_min ? DivValue(v1_max, v2_max) : DivValue(v1_min, v2_min);
     }
-  } else if (v1_max.is_known && v1_max.a_constant == 0 && v1_max.b_constant <= 0) {
-    // Negative range vs. positive or negative range.
-    if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
-      return is_min ? DivValue(v1_min, v2_min)
-                    : DivValue(v1_max, v2_max);
-    } else if (v2_max.is_known && v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
-      return is_min ? DivValue(v1_max, v2_min)
-                    : DivValue(v1_min, v2_max);
+  }
+  // Negative range vs. positive or negative range.
+  if (IsConstantValue(v1_max) && v1_max.b_constant <= 0) {
+    if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
+      return is_min ? DivValue(v1_min, v2_min) : DivValue(v1_max, v2_max);
+    } else if (IsConstantValue(v2_max) && v2_max.b_constant <= 0) {
+      return is_min ? DivValue(v1_max, v2_min) : DivValue(v1_min, v2_max);
     }
   }
   return Value();
 }
 
-bool InductionVarRange::IsConstantRange(HInductionVarAnalysis::InductionInfo* info,
-                                        int32_t *min_value,
-                                        int32_t *max_value) const {
-  bool in_body = true;  // no known trip count
-  Value v_min = GetVal(info, nullptr, in_body, /* is_min */ true);
-  Value v_max = GetVal(info, nullptr, in_body, /* is_min */ false);
-  do {
-    if (v_min.is_known && v_min.a_constant == 0 && v_max.is_known && v_max.a_constant == 0) {
-      *min_value = v_min.b_constant;
-      *max_value = v_max.b_constant;
-      return true;
-    }
-  } while (RefineOuter(&v_min, &v_max));
-  return false;
+InductionVarRange::Value InductionVarRange::MulRangeAndConstant(Value v_min,
+                                                                Value v_max,
+                                                                Value c,
+                                                                bool is_min) const {
+  return is_min == (c.b_constant >= 0) ? MulValue(v_min, c) : MulValue(v_max, c);
+}
+
+InductionVarRange::Value InductionVarRange::DivRangeAndConstant(Value v_min,
+                                                                Value v_max,
+                                                                Value c,
+                                                                bool is_min) const {
+  return is_min == (c.b_constant >= 0) ? DivValue(v_min, c) : DivValue(v_max, c);
 }
 
 InductionVarRange::Value InductionVarRange::AddValue(Value v1, Value v2) const {
@@ -471,22 +533,25 @@
 }
 
 InductionVarRange::Value InductionVarRange::RefineOuter(Value v, bool is_min) const {
-  if (v.instruction != nullptr) {
-    HLoopInformation* loop =
-        v.instruction->GetBlock()->GetLoopInformation();  // closest enveloping loop
-    if (loop != nullptr) {
-      // Set up loop information.
-      bool in_body = true;  // use is always in body of outer loop
-      HInductionVarAnalysis::InductionInfo* info =
-          induction_analysis_->LookupInfo(loop, v.instruction);
-      HInductionVarAnalysis::InductionInfo* trip =
-          induction_analysis_->LookupInfo(loop, loop->GetHeader()->GetLastInstruction());
-      // Try to refine "a x instruction + b" with outer loop range information on instruction.
-      return AddValue(MulValue(Value(v.a_constant), GetVal(info, trip, in_body, is_min)),
-                      Value(v.b_constant));
-    }
+  if (v.instruction == nullptr) {
+    return v;  // nothing to refine
   }
-  return v;
+  HLoopInformation* loop =
+      v.instruction->GetBlock()->GetLoopInformation();  // closest enveloping loop
+  if (loop == nullptr) {
+    return v;  // no loop
+  }
+  HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, v.instruction);
+  if (info == nullptr) {
+    return v;  // no induction information
+  }
+  // Set up loop information.
+  HBasicBlock* header = loop->GetHeader();
+  bool in_body = true;  // inner always in more outer
+  HInductionVarAnalysis::InductionInfo* trip =
+      induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
+  // Try to refine "a x instruction + b" with outer loop range information on instruction.
+  return AddValue(MulValue(Value(v.a_constant), GetVal(info, trip, in_body, is_min)), Value(v.b_constant));
 }
 
 bool InductionVarRange::GenerateCode(HInstruction* context,
@@ -499,44 +564,45 @@
                                      /*out*/bool* needs_finite_test,
                                      /*out*/bool* needs_taken_test) const {
   HLoopInformation* loop = context->GetBlock()->GetLoopInformation();  // closest enveloping loop
-  if (loop != nullptr) {
-    // Set up loop information.
-    HBasicBlock* header = loop->GetHeader();
-    bool in_body = context->GetBlock() != header;
-    HInductionVarAnalysis::InductionInfo* info =
-        induction_analysis_->LookupInfo(loop, instruction);
-    if (info == nullptr) {
-      return false;  // nothing to analyze
-    }
-    HInductionVarAnalysis::InductionInfo* trip =
-        induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
-    // Determine what tests are needed. A finite test is needed if the evaluation code uses the
-    // trip-count and the loop maybe unsafe (because in such cases, the index could "overshoot"
-    // the computed range). A taken test is needed for any unknown trip-count, even if evaluation
-    // code does not use the trip-count explicitly (since there could be an implicit relation
-    // between e.g. an invariant subscript and a not-taken condition).
-    *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
-    *needs_taken_test = IsBodyTripCount(trip);
-    // Code generation for taken test: generate the code when requested or otherwise analyze
-    // if code generation is feasible when taken test is needed.
-    if (taken_test != nullptr) {
-      return GenerateCode(
-          trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
-    } else if (*needs_taken_test) {
-      if (!GenerateCode(
-          trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
-        return false;
-      }
-    }
-    // Code generation for lower and upper.
-    return
-        // Success on lower if invariant (not set), or code can be generated.
-        ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
-            GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
-        // And success on upper.
-        GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+  if (loop == nullptr) {
+    return false;  // no loop
   }
-  return false;
+  HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
+  if (info == nullptr) {
+    return false;  // no induction information
+  }
+  // Set up loop information.
+  HBasicBlock* header = loop->GetHeader();
+  bool in_body = context->GetBlock() != header;
+  HInductionVarAnalysis::InductionInfo* trip =
+      induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
+  if (trip == nullptr) {
+    return false;  // codegen relies on trip count
+  }
+  // Determine what tests are needed. A finite test is needed if the evaluation code uses the
+  // trip-count and the loop maybe unsafe (because in such cases, the index could "overshoot"
+  // the computed range). A taken test is needed for any unknown trip-count, even if evaluation
+  // code does not use the trip-count explicitly (since there could be an implicit relation
+  // between e.g. an invariant subscript and a not-taken condition).
+  *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
+  *needs_taken_test = IsBodyTripCount(trip);
+  // Code generation for taken test: generate the code when requested or otherwise analyze
+  // if code generation is feasible when taken test is needed.
+  if (taken_test != nullptr) {
+    return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+  } else if (*needs_taken_test) {
+    if (!GenerateCode(
+        trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+      return false;
+    }
+  }
+  // Code generation for lower and upper.
+  return
+      // Success on lower if invariant (not set), or code can be generated.
+      ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
+          GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+      // And success on upper.
+      GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
 }
 
 bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
@@ -639,9 +705,8 @@
       case HInductionVarAnalysis::kLinear: {
         // Linear induction a * i + b, for normalized 0 <= i < TC. Restrict to unit stride only
         // to avoid arithmetic wrap-around situations that are hard to guard against.
-        int32_t min_value = 0;
-        int32_t stride_value = 0;
-        if (IsConstantRange(info->op_a, &min_value, &stride_value) && min_value == stride_value) {
+        int64_t stride_value = 0;
+        if (IsConstant(info->op_a, kExact, &stride_value)) {
           if (stride_value == 1 || stride_value == -1) {
             const bool is_min_a = stride_value == 1 ? is_min : !is_min;
             if (GenerateCode(trip,       trip, graph, block, &opa, in_body, is_min_a) &&
@@ -666,7 +731,7 @@
         // Wrap-around and periodic inductions are restricted to constants only, so that extreme
         // values are easy to test at runtime without complications of arithmetic wrap-around.
         Value extreme = GetVal(info, trip, in_body, is_min);
-        if (extreme.is_known && extreme.a_constant == 0) {
+        if (IsConstantValue(extreme)) {
           if (graph != nullptr) {
             *result = graph->GetIntConstant(extreme.b_constant);
           }
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 3cb7b4b..0af4156 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -69,7 +69,8 @@
                          /*out*/ bool* needs_finite_test);
 
   /** Refines the values with induction of next outer loop. Returns true on change. */
-  bool RefineOuter(/*in-out*/Value* min_val, /*in-out*/Value* max_val) const;
+  bool RefineOuter(/*in-out*/ Value* min_val,
+                   /*in-out*/ Value* max_val) const;
 
   /**
    * Returns true if range analysis is able to generate code for the lower and upper
@@ -116,6 +117,23 @@
                          /*out*/ HInstruction** taken_test);
 
  private:
+  /*
+   * Enum used in IsConstant() request.
+   */
+  enum ConstantRequest {
+    kExact,
+    kAtMost,
+    kAtLeast
+  };
+
+  /**
+   * Returns true if exact or upper/lower bound on the given induction
+   * information is known as a 64-bit constant, which is returned in value.
+   */
+  bool IsConstant(HInductionVarAnalysis::InductionInfo* info,
+                  ConstantRequest request,
+                  /*out*/ int64_t *value) const;
+
   bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const;
   bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
   bool IsUnsafeTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
@@ -143,9 +161,8 @@
                bool in_body,
                bool is_min) const;
 
-  bool IsConstantRange(HInductionVarAnalysis::InductionInfo* info,
-                       int32_t *min_value,
-                       int32_t *max_value) const;
+  Value MulRangeAndConstant(Value v1, Value v2, Value c, bool is_min) const;
+  Value DivRangeAndConstant(Value v1, Value v2, Value c, bool is_min) const;
 
   Value AddValue(Value v1, Value v2) const;
   Value SubValue(Value v1, Value v2) const;
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 55a654e..c5c33bd 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -215,10 +215,16 @@
     return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
   }
 
-  bool IsConstantRange(HInductionVarAnalysis::InductionInfo* info,
-                       int32_t* min_value,
-                       int32_t* max_value) {
-    return range_.IsConstantRange(info, min_value, max_value);
+  bool IsExact(HInductionVarAnalysis::InductionInfo* info, int64_t* value) {
+    return range_.IsConstant(info, InductionVarRange::kExact, value);
+  }
+
+  bool IsAtMost(HInductionVarAnalysis::InductionInfo* info, int64_t* value) {
+    return range_.IsConstant(info, InductionVarRange::kAtMost, value);
+  }
+
+  bool IsAtLeast(HInductionVarAnalysis::InductionInfo* info, int64_t* value) {
+    return range_.IsConstant(info, InductionVarRange::kAtLeast, value);
   }
 
   Value AddValue(Value v1, Value v2) { return range_.AddValue(v1, v2); }
@@ -249,6 +255,34 @@
 // Tests on private methods.
 //
 
+TEST_F(InductionVarRangeTest, IsConstant) {
+  int64_t value;
+  // Constant.
+  EXPECT_TRUE(IsExact(CreateConst(12345), &value));
+  EXPECT_EQ(12345, value);
+  EXPECT_TRUE(IsAtMost(CreateConst(12345), &value));
+  EXPECT_EQ(12345, value);
+  EXPECT_TRUE(IsAtLeast(CreateConst(12345), &value));
+  EXPECT_EQ(12345, value);
+  // Constant trivial range.
+  EXPECT_TRUE(IsExact(CreateRange(111, 111), &value));
+  EXPECT_EQ(111, value);
+  EXPECT_TRUE(IsAtMost(CreateRange(111, 111), &value));
+  EXPECT_EQ(111, value);
+  EXPECT_TRUE(IsAtLeast(CreateRange(111, 111), &value));
+  EXPECT_EQ(111, value);
+  // Constant non-trivial range.
+  EXPECT_FALSE(IsExact(CreateRange(11, 22), &value));
+  EXPECT_TRUE(IsAtMost(CreateRange(11, 22), &value));
+  EXPECT_EQ(22, value);
+  EXPECT_TRUE(IsAtLeast(CreateRange(11, 22), &value));
+  EXPECT_EQ(11, value);
+  // Symbolic.
+  EXPECT_FALSE(IsExact(CreateFetch(x_), &value));
+  EXPECT_FALSE(IsAtMost(CreateFetch(x_), &value));
+  EXPECT_FALSE(IsAtLeast(CreateFetch(x_), &value));
+}
+
 TEST_F(InductionVarRangeTest, TripCountProperties) {
   EXPECT_FALSE(NeedsTripCount(nullptr));
   EXPECT_FALSE(NeedsTripCount(CreateConst(1)));
@@ -367,6 +401,10 @@
 }
 
 TEST_F(InductionVarRangeTest, GetMulMin) {
+  ExpectEqual(Value(-14), GetMul(CreateConst(2), CreateRange(-7, 8), true));
+  ExpectEqual(Value(-16), GetMul(CreateConst(-2), CreateRange(-7, 8), true));
+  ExpectEqual(Value(-14), GetMul(CreateRange(-7, 8), CreateConst(2), true));
+  ExpectEqual(Value(-16), GetMul(CreateRange(-7, 8), CreateConst(-2), true));
   ExpectEqual(Value(6), GetMul(CreateRange(2, 10), CreateRange(3, 5), true));
   ExpectEqual(Value(-50), GetMul(CreateRange(2, 10), CreateRange(-5, -3), true));
   ExpectEqual(Value(), GetMul(CreateRange(2, 10), CreateRange(-1, 1), true));
@@ -379,6 +417,10 @@
 }
 
 TEST_F(InductionVarRangeTest, GetMulMax) {
+  ExpectEqual(Value(16), GetMul(CreateConst(2), CreateRange(-7, 8), false));
+  ExpectEqual(Value(14), GetMul(CreateConst(-2), CreateRange(-7, 8), false));
+  ExpectEqual(Value(16), GetMul(CreateRange(-7, 8), CreateConst(2), false));
+  ExpectEqual(Value(14), GetMul(CreateRange(-7, 8), CreateConst(-2), false));
   ExpectEqual(Value(50), GetMul(CreateRange(2, 10), CreateRange(3, 5), false));
   ExpectEqual(Value(-6), GetMul(CreateRange(2, 10), CreateRange(-5, -3), false));
   ExpectEqual(Value(), GetMul(CreateRange(2, 10), CreateRange(-1, 1), false));
@@ -391,6 +433,8 @@
 }
 
 TEST_F(InductionVarRangeTest, GetDivMin) {
+  ExpectEqual(Value(-5), GetDiv(CreateRange(-10, 20), CreateConst(2), true));
+  ExpectEqual(Value(-10), GetDiv(CreateRange(-10, 20), CreateConst(-2), true));
   ExpectEqual(Value(10), GetDiv(CreateRange(40, 1000), CreateRange(2, 4), true));
   ExpectEqual(Value(-500), GetDiv(CreateRange(40, 1000), CreateRange(-4, -2), true));
   ExpectEqual(Value(), GetDiv(CreateRange(40, 1000), CreateRange(-1, 1), true));
@@ -403,6 +447,8 @@
 }
 
 TEST_F(InductionVarRangeTest, GetDivMax) {
+  ExpectEqual(Value(10), GetDiv(CreateRange(-10, 20), CreateConst(2), false));
+  ExpectEqual(Value(5), GetDiv(CreateRange(-10, 20), CreateConst(-2), false));
   ExpectEqual(Value(500), GetDiv(CreateRange(40, 1000), CreateRange(2, 4), false));
   ExpectEqual(Value(-10), GetDiv(CreateRange(40, 1000), CreateRange(-4, -2), false));
   ExpectEqual(Value(), GetDiv(CreateRange(40, 1000), CreateRange(-1, 1), false));
@@ -414,18 +460,6 @@
   ExpectEqual(Value(), GetDiv(CreateRange(-1, 1), CreateRange(-1, 1), false));
 }
 
-TEST_F(InductionVarRangeTest, IsConstantRange) {
-  int32_t min_value;
-  int32_t max_value;
-  ASSERT_TRUE(IsConstantRange(CreateConst(12345), &min_value, &max_value));
-  EXPECT_EQ(12345, min_value);
-  EXPECT_EQ(12345, max_value);
-  ASSERT_TRUE(IsConstantRange(CreateRange(1, 2), &min_value, &max_value));
-  EXPECT_EQ(1, min_value);
-  EXPECT_EQ(2, max_value);
-  EXPECT_FALSE(IsConstantRange(CreateFetch(x_), &min_value, &max_value));
-}
-
 TEST_F(InductionVarRangeTest, AddValue) {
   ExpectEqual(Value(110), AddValue(Value(10), Value(100)));
   ExpectEqual(Value(-5), AddValue(Value(x_, 1, -4), Value(x_, -1, -1)));
@@ -459,6 +493,24 @@
   ExpectEqual(Value(), MulValue(Value(90000), Value(-90000)));  // unsafe
 }
 
+TEST_F(InductionVarRangeTest, MulValueSpecial) {
+  const int32_t min_value = std::numeric_limits<int32_t>::min();
+  const int32_t max_value = std::numeric_limits<int32_t>::max();
+
+  // Unsafe.
+  ExpectEqual(Value(), MulValue(Value(min_value), Value(min_value)));
+  ExpectEqual(Value(), MulValue(Value(min_value), Value(-1)));
+  ExpectEqual(Value(), MulValue(Value(min_value), Value(max_value)));
+  ExpectEqual(Value(), MulValue(Value(max_value), Value(max_value)));
+
+  // Safe.
+  ExpectEqual(Value(min_value), MulValue(Value(min_value), Value(1)));
+  ExpectEqual(Value(max_value), MulValue(Value(max_value), Value(1)));
+  ExpectEqual(Value(-max_value), MulValue(Value(max_value), Value(-1)));
+  ExpectEqual(Value(-1), MulValue(Value(1), Value(-1)));
+  ExpectEqual(Value(1), MulValue(Value(-1), Value(-1)));
+}
+
 TEST_F(InductionVarRangeTest, DivValue) {
   ExpectEqual(Value(25), DivValue(Value(100), Value(4)));
   ExpectEqual(Value(), DivValue(Value(x_, 1, -4), Value(x_, 1, -1)));
@@ -468,6 +520,23 @@
   ExpectEqual(Value(), DivValue(Value(1), Value(0)));  // unsafe
 }
 
+TEST_F(InductionVarRangeTest, DivValueSpecial) {
+  const int32_t min_value = std::numeric_limits<int32_t>::min();
+  const int32_t max_value = std::numeric_limits<int32_t>::max();
+
+  // Unsafe.
+  ExpectEqual(Value(), DivValue(Value(min_value), Value(-1)));
+
+  // Safe.
+  ExpectEqual(Value(1), DivValue(Value(min_value), Value(min_value)));
+  ExpectEqual(Value(1), DivValue(Value(max_value), Value(max_value)));
+  ExpectEqual(Value(min_value), DivValue(Value(min_value), Value(1)));
+  ExpectEqual(Value(max_value), DivValue(Value(max_value), Value(1)));
+  ExpectEqual(Value(-max_value), DivValue(Value(max_value), Value(-1)));
+  ExpectEqual(Value(-1), DivValue(Value(1), Value(-1)));
+  ExpectEqual(Value(1), DivValue(Value(-1), Value(-1)));
+}
+
 TEST_F(InductionVarRangeTest, MinValue) {
   ExpectEqual(Value(10), MinValue(Value(10), Value(100)));
   ExpectEqual(Value(x_, 1, -4), MinValue(Value(x_, 1, -4), Value(x_, 1, -1)));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 02a1acc..3e3719e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -562,31 +562,16 @@
   graph_->reverse_post_order_[++index] = otherwise;
   graph_->reverse_post_order_[++index] = merge;
 
-  // Set the loop information of the newly created blocks.
-  HLoopInformation* loop_info = cursor_block->GetLoopInformation();
-  if (loop_info != nullptr) {
-    then->SetLoopInformation(cursor_block->GetLoopInformation());
-    merge->SetLoopInformation(cursor_block->GetLoopInformation());
-    otherwise->SetLoopInformation(cursor_block->GetLoopInformation());
-    for (HLoopInformationOutwardIterator loop_it(*cursor_block);
-         !loop_it.Done();
-         loop_it.Advance()) {
-      loop_it.Current()->Add(then);
-      loop_it.Current()->Add(merge);
-      loop_it.Current()->Add(otherwise);
-    }
-    // In case the original invoke location was a back edge, we need to update
-    // the loop to now have the merge block as a back edge.
-    if (loop_info->IsBackEdge(*original_invoke_block)) {
-      loop_info->RemoveBackEdge(original_invoke_block);
-      loop_info->AddBackEdge(merge);
-    }
-  }
 
-  // Set the try/catch information of the newly created blocks.
-  then->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
-  merge->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
-  otherwise->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
+  graph_->UpdateLoopAndTryInformationOfNewBlock(
+      then, original_invoke_block, /* replace_if_back_edge */ false);
+  graph_->UpdateLoopAndTryInformationOfNewBlock(
+      otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+
+  // In case the original invoke location was a back edge, we need to update
+  // the loop to now have the merge block as a back edge.
+  graph_->UpdateLoopAndTryInformationOfNewBlock(
+      merge, original_invoke_block, /* replace_if_back_edge */ true);
 }
 
 bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
@@ -603,6 +588,10 @@
 
   DCHECK(resolved_method != nullptr);
   ArtMethod* actual_method = nullptr;
+  size_t method_index = invoke_instruction->IsInvokeVirtual()
+      ? invoke_instruction->AsInvokeVirtual()->GetVTableIndex()
+      : invoke_instruction->AsInvokeInterface()->GetImtIndex();
+
   // Check whether we are actually calling the same method among
   // the different types seen.
   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
@@ -611,13 +600,18 @@
     }
     ArtMethod* new_method = nullptr;
     if (invoke_instruction->IsInvokeInterface()) {
-      new_method = ic.GetTypeAt(i)->FindVirtualMethodForInterface(
-          resolved_method, pointer_size);
+      new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry(
+          method_index % mirror::Class::kImtSize, pointer_size);
+      if (new_method->IsRuntimeMethod()) {
+        // Bail out as soon as we see a conflict trampoline in one of the target's
+        // interface table.
+        return false;
+      }
     } else {
       DCHECK(invoke_instruction->IsInvokeVirtual());
-      new_method = ic.GetTypeAt(i)->FindVirtualMethodForVirtual(
-          resolved_method, pointer_size);
+      new_method = ic.GetTypeAt(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
     }
+    DCHECK(new_method != nullptr);
     if (actual_method == nullptr) {
       actual_method = new_method;
     } else if (actual_method != new_method) {
@@ -641,18 +635,15 @@
   HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
       class_linker, receiver, invoke_instruction->GetDexPc());
 
-  size_t method_offset = invoke_instruction->IsInvokeVirtual()
-      ? actual_method->GetVtableIndex()
-      : invoke_instruction->AsInvokeInterface()->GetImtIndex();
-
   Primitive::Type type = Is64BitInstructionSet(graph_->GetInstructionSet())
       ? Primitive::kPrimLong
       : Primitive::kPrimInt;
   HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet(
       receiver_class,
       type,
-      invoke_instruction->IsInvokeVirtual() ? HClassTableGet::kVTable : HClassTableGet::kIMTable,
-      method_offset,
+      invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable
+                                            : HClassTableGet::TableKind::kIMTable,
+      method_index,
       invoke_instruction->GetDexPc());
 
   HConstant* constant;
@@ -1019,6 +1010,8 @@
     // at runtime, we change this call as if it was a virtual call.
     invoke_type = kVirtual;
   }
+
+  const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
   HGraph* callee_graph = new (graph_->GetArena()) HGraph(
       graph_->GetArena(),
       callee_dex_file,
@@ -1028,7 +1021,7 @@
       invoke_type,
       graph_->IsDebuggable(),
       /* osr */ false,
-      graph_->GetCurrentInstructionId());
+      caller_instruction_counter);
   callee_graph->SetArtMethod(resolved_method);
 
   OptimizingCompilerStats inline_stats;
@@ -1228,7 +1221,16 @@
   }
   number_of_inlined_instructions_ += number_of_instructions;
 
+  DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
+      << "No instructions can be added to the outer graph while inner graph is being built";
+
+  const int32_t callee_instruction_counter = callee_graph->GetCurrentInstructionId();
+  graph_->SetCurrentInstructionId(callee_instruction_counter);
   *return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+
+  DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
+      << "No instructions can be added to the inner graph during inlining into the outer graph";
+
   return true;
 }
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 13d3f75..f8a9a94 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -93,6 +93,7 @@
   void SimplifyStringEquals(HInvoke* invoke);
   void SimplifyCompare(HInvoke* invoke, bool has_zero_op);
   void SimplifyIsNaN(HInvoke* invoke);
+  void SimplifyFP2Int(HInvoke* invoke);
 
   OptimizingCompilerStats* stats_;
   bool simplification_occurred_ = false;
@@ -1562,26 +1563,71 @@
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition);
 }
 
+void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) {
+  DCHECK(invoke->IsInvokeStaticOrDirect());
+  uint32_t dex_pc = invoke->GetDexPc();
+  HInstruction* x = invoke->InputAt(0);
+  Primitive::Type type = x->GetType();
+  // Set proper bit pattern for NaN and replace intrinsic with raw version.
+  HInstruction* nan;
+  if (type == Primitive::kPrimDouble) {
+    nan = GetGraph()->GetLongConstant(0x7ff8000000000000L);
+    invoke->SetIntrinsic(Intrinsics::kDoubleDoubleToRawLongBits,
+                         kNeedsEnvironmentOrCache,
+                         kNoSideEffects,
+                         kNoThrow);
+  } else {
+    DCHECK_EQ(type, Primitive::kPrimFloat);
+    nan = GetGraph()->GetIntConstant(0x7fc00000);
+    invoke->SetIntrinsic(Intrinsics::kFloatFloatToRawIntBits,
+                         kNeedsEnvironmentOrCache,
+                         kNoSideEffects,
+                         kNoThrow);
+  }
+  // Test IsNaN(x), which is the same as x != x.
+  HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+  condition->SetBias(ComparisonBias::kLtBias);
+  invoke->GetBlock()->InsertInstructionBefore(condition, invoke->GetNext());
+  // Select between the two.
+  HInstruction* select = new (GetGraph()->GetArena()) HSelect(condition, nan, invoke, dex_pc);
+  invoke->GetBlock()->InsertInstructionBefore(select, condition->GetNext());
+  invoke->ReplaceWithExceptInReplacementAtIndex(select, 0);  // false at index 0
+}
+
 void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
-  if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
-    SimplifyStringEquals(instruction);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kSystemArrayCopy) {
-    SimplifySystemArrayCopy(instruction);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerRotateRight ||
-             instruction->GetIntrinsic() == Intrinsics::kLongRotateRight) {
-    SimplifyRotate(instruction, false);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerRotateLeft ||
-             instruction->GetIntrinsic() == Intrinsics::kLongRotateLeft) {
-    SimplifyRotate(instruction, true);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerCompare ||
-             instruction->GetIntrinsic() == Intrinsics::kLongCompare) {
-    SimplifyCompare(instruction, /* is_signum */ false);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerSignum ||
-             instruction->GetIntrinsic() == Intrinsics::kLongSignum) {
-    SimplifyCompare(instruction, /* is_signum */ true);
-  } else if (instruction->GetIntrinsic() == Intrinsics::kFloatIsNaN ||
-             instruction->GetIntrinsic() == Intrinsics::kDoubleIsNaN) {
-    SimplifyIsNaN(instruction);
+  switch (instruction->GetIntrinsic()) {
+    case Intrinsics::kStringEquals:
+      SimplifyStringEquals(instruction);
+      break;
+    case Intrinsics::kSystemArrayCopy:
+      SimplifySystemArrayCopy(instruction);
+      break;
+    case Intrinsics::kIntegerRotateRight:
+    case Intrinsics::kLongRotateRight:
+      SimplifyRotate(instruction, false);
+      break;
+    case Intrinsics::kIntegerRotateLeft:
+    case Intrinsics::kLongRotateLeft:
+      SimplifyRotate(instruction, true);
+      break;
+    case Intrinsics::kIntegerCompare:
+    case Intrinsics::kLongCompare:
+      SimplifyCompare(instruction, /* is_signum */ false);
+      break;
+    case Intrinsics::kIntegerSignum:
+    case Intrinsics::kLongSignum:
+      SimplifyCompare(instruction, /* is_signum */ true);
+      break;
+    case Intrinsics::kFloatIsNaN:
+    case Intrinsics::kDoubleIsNaN:
+      SimplifyIsNaN(instruction);
+      break;
+    case Intrinsics::kFloatFloatToIntBits:
+    case Intrinsics::kDoubleDoubleToLongBits:
+      SimplifyFP2Int(instruction);
+      break;
+    default:
+      break;
   }
 }
 
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
new file mode 100644
index 0000000..db1f9a7
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_arm.h"
+#include "instruction_simplifier_shared.h"
+
+namespace art {
+namespace arm {
+
+void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
+  if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+    RecordSimplification();
+  }
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
new file mode 100644
index 0000000..379b95d
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+namespace arm {
+
+class InstructionSimplifierArmVisitor : public HGraphVisitor {
+ public:
+  InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
+      : HGraphVisitor(graph), stats_(stats) {}
+
+ private:
+  void RecordSimplification() {
+    if (stats_ != nullptr) {
+      stats_->RecordStat(kInstructionSimplificationsArch);
+    }
+  }
+
+  void VisitMul(HMul* instruction) OVERRIDE;
+
+  OptimizingCompilerStats* stats_;
+};
+
+
+class InstructionSimplifierArm : public HOptimization {
+ public:
+  InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
+    : HOptimization(graph, "instruction_simplifier_arm", stats) {}
+
+  void Run() OVERRIDE {
+    InstructionSimplifierArmVisitor visitor(graph_, stats_);
+    visitor.VisitReversePostOrder();
+  }
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_ARM_H_
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 4bcfc54..c2bbdcc 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -17,6 +17,7 @@
 #include "instruction_simplifier_arm64.h"
 
 #include "common_arm64.h"
+#include "instruction_simplifier_shared.h"
 #include "mirror/array-inl.h"
 
 namespace art {
@@ -179,67 +180,53 @@
   return true;
 }
 
-bool InstructionSimplifierArm64Visitor::TrySimpleMultiplyAccumulatePatterns(
-    HMul* mul, HBinaryOperation* input_binop, HInstruction* input_other) {
-  DCHECK(Primitive::IsIntOrLongType(mul->GetType()));
-  DCHECK(input_binop->IsAdd() || input_binop->IsSub());
-  DCHECK_NE(input_binop, input_other);
-  if (!input_binop->HasOnlyOneNonEnvironmentUse()) {
-    return false;
-  }
+bool InstructionSimplifierArm64Visitor::TryMergeNegatedInput(HBinaryOperation* op) {
+  DCHECK(op->IsAnd() || op->IsOr() || op->IsXor()) << op->DebugName();
+  HInstruction* left = op->GetLeft();
+  HInstruction* right = op->GetRight();
 
-  // Try to interpret patterns like
-  //    a * (b <+/-> 1)
-  // as
-  //    (a * b) <+/-> a
-  HInstruction* input_a = input_other;
-  HInstruction* input_b = nullptr;  // Set to a non-null value if we found a pattern to optimize.
-  HInstruction::InstructionKind op_kind;
+  // Only consider the case where there is exactly one Not, with 2 Not's De
+  // Morgan's laws should be applied instead.
+  if (left->IsNot() ^ right->IsNot()) {
+    HInstruction* hnot = (left->IsNot() ? left : right);
+    HInstruction* hother = (left->IsNot() ? right : left);
 
-  if (input_binop->IsAdd()) {
-    if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) {
-      // Interpret
-      //    a * (b + 1)
-      // as
-      //    (a * b) + a
-      input_b = input_binop->GetLeastConstantLeft();
-      op_kind = HInstruction::kAdd;
-    }
-  } else {
-    DCHECK(input_binop->IsSub());
-    if (input_binop->GetRight()->IsConstant() &&
-        input_binop->GetRight()->AsConstant()->IsMinusOne()) {
-      // Interpret
-      //    a * (b - (-1))
-      // as
-      //    a + (a * b)
-      input_b = input_binop->GetLeft();
-      op_kind = HInstruction::kAdd;
-    } else if (input_binop->GetLeft()->IsConstant() &&
-               input_binop->GetLeft()->AsConstant()->IsOne()) {
-      // Interpret
-      //    a * (1 - b)
-      // as
-      //    a - (a * b)
-      input_b = input_binop->GetRight();
-      op_kind = HInstruction::kSub;
+    // Only do the simplification if the Not has only one use and can thus be
+    // safely removed. Even though ARM64 negated bitwise operations do not have
+    // an immediate variant (only register), we still do the simplification when
+    // `hother` is a constant, because it removes an instruction if the constant
+    // cannot be encoded as an immediate:
+    //   mov r0, #large_constant
+    //   neg r2, r1
+    //   and r0, r0, r2
+    // becomes:
+    //   mov r0, #large_constant
+    //   bic r0, r0, r1
+    if (hnot->HasOnlyOneNonEnvironmentUse()) {
+      // Replace code looking like
+      //    NOT tmp, mask
+      //    AND dst, src, tmp   (respectively ORR, EOR)
+      // with
+      //    BIC dst, src, mask  (respectively ORN, EON)
+      HInstruction* src = hnot->AsNot()->GetInput();
+
+      HArm64BitwiseNegatedRight* neg_op = new (GetGraph()->GetArena())
+          HArm64BitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc());
+
+      op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op);
+      hnot->GetBlock()->RemoveInstruction(hnot);
+      RecordSimplification();
+      return true;
     }
   }
 
-  if (input_b == nullptr) {
-    // We did not find a pattern we can optimize.
-    return false;
-  }
-
-  HArm64MultiplyAccumulate* mulacc = new(GetGraph()->GetArena()) HArm64MultiplyAccumulate(
-      mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
-
-  mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
-  input_binop->GetBlock()->RemoveInstruction(input_binop);
-
   return false;
 }
 
+void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) {
+  TryMergeNegatedInput(instruction);
+}
+
 void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
   TryExtractArrayAccessAddress(instruction,
                                instruction->GetArray(),
@@ -255,76 +242,13 @@
 }
 
 void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
-  Primitive::Type type = instruction->GetType();
-  if (!Primitive::IsIntOrLongType(type)) {
-    return;
+  if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+    RecordSimplification();
   }
+}
 
-  HInstruction* use = instruction->HasNonEnvironmentUses()
-      ? instruction->GetUses().GetFirst()->GetUser()
-      : nullptr;
-
-  if (instruction->HasOnlyOneNonEnvironmentUse() && (use->IsAdd() || use->IsSub())) {
-    // Replace code looking like
-    //    MUL tmp, x, y
-    //    SUB dst, acc, tmp
-    // with
-    //    MULSUB dst, acc, x, y
-    // Note that we do not want to (unconditionally) perform the merge when the
-    // multiplication has multiple uses and it can be merged in all of them.
-    // Multiple uses could happen on the same control-flow path, and we would
-    // then increase the amount of work. In the future we could try to evaluate
-    // whether all uses are on different control-flow paths (using dominance and
-    // reverse-dominance information) and only perform the merge when they are.
-    HInstruction* accumulator = nullptr;
-    HBinaryOperation* binop = use->AsBinaryOperation();
-    HInstruction* binop_left = binop->GetLeft();
-    HInstruction* binop_right = binop->GetRight();
-    // Be careful after GVN. This should not happen since the `HMul` has only
-    // one use.
-    DCHECK_NE(binop_left, binop_right);
-    if (binop_right == instruction) {
-      accumulator = binop_left;
-    } else if (use->IsAdd()) {
-      DCHECK_EQ(binop_left, instruction);
-      accumulator = binop_right;
-    }
-
-    if (accumulator != nullptr) {
-      HArm64MultiplyAccumulate* mulacc =
-          new (GetGraph()->GetArena()) HArm64MultiplyAccumulate(type,
-                                                                binop->GetKind(),
-                                                                accumulator,
-                                                                instruction->GetLeft(),
-                                                                instruction->GetRight());
-
-      binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
-      DCHECK(!instruction->HasUses());
-      instruction->GetBlock()->RemoveInstruction(instruction);
-      RecordSimplification();
-      return;
-    }
-  }
-
-  // Use multiply accumulate instruction for a few simple patterns.
-  // We prefer not applying the following transformations if the left and
-  // right inputs perform the same operation.
-  // We rely on GVN having squashed the inputs if appropriate. However the
-  // results are still correct even if that did not happen.
-  if (instruction->GetLeft() == instruction->GetRight()) {
-    return;
-  }
-
-  HInstruction* left = instruction->GetLeft();
-  HInstruction* right = instruction->GetRight();
-  if ((right->IsAdd() || right->IsSub()) &&
-      TrySimpleMultiplyAccumulatePatterns(instruction, right->AsBinaryOperation(), left)) {
-    return;
-  }
-  if ((left->IsAdd() || left->IsSub()) &&
-      TrySimpleMultiplyAccumulatePatterns(instruction, left->AsBinaryOperation(), right)) {
-    return;
-  }
+void InstructionSimplifierArm64Visitor::VisitOr(HOr* instruction) {
+  TryMergeNegatedInput(instruction);
 }
 
 void InstructionSimplifierArm64Visitor::VisitShl(HShl* instruction) {
@@ -359,5 +283,9 @@
   }
 }
 
+void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) {
+  TryMergeNegatedInput(instruction);
+}
+
 }  // namespace arm64
 }  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index b7f490b..cf84587 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -51,18 +51,21 @@
     return TryMergeIntoShifterOperand(use, bitfield_op, true);
   }
 
-  bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
-                                           HBinaryOperation* input_binop,
-                                           HInstruction* input_other);
+  // For bitwise operations (And/Or/Xor) with a negated input, try to use
+  // a negated bitwise instruction.
+  bool TryMergeNegatedInput(HBinaryOperation* op);
 
   // HInstruction visitors, sorted alphabetically.
+  void VisitAnd(HAnd* instruction) OVERRIDE;
   void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
   void VisitArraySet(HArraySet* instruction) OVERRIDE;
   void VisitMul(HMul* instruction) OVERRIDE;
+  void VisitOr(HOr* instruction) OVERRIDE;
   void VisitShl(HShl* instruction) OVERRIDE;
   void VisitShr(HShr* instruction) OVERRIDE;
   void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
   void VisitUShr(HUShr* instruction) OVERRIDE;
+  void VisitXor(HXor* instruction) OVERRIDE;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
new file mode 100644
index 0000000..45d196f
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_shared.h"
+
+namespace art {
+
+namespace {
+
+bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
+                                         HBinaryOperation* input_binop,
+                                         HInstruction* input_other) {
+  DCHECK(Primitive::IsIntOrLongType(mul->GetType()));
+  DCHECK(input_binop->IsAdd() || input_binop->IsSub());
+  DCHECK_NE(input_binop, input_other);
+  if (!input_binop->HasOnlyOneNonEnvironmentUse()) {
+    return false;
+  }
+
+  // Try to interpret patterns like
+  //    a * (b <+/-> 1)
+  // as
+  //    (a * b) <+/-> a
+  HInstruction* input_a = input_other;
+  HInstruction* input_b = nullptr;  // Set to a non-null value if we found a pattern to optimize.
+  HInstruction::InstructionKind op_kind;
+
+  if (input_binop->IsAdd()) {
+    if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) {
+      // Interpret
+      //    a * (b + 1)
+      // as
+      //    (a * b) + a
+      input_b = input_binop->GetLeastConstantLeft();
+      op_kind = HInstruction::kAdd;
+    }
+  } else {
+    DCHECK(input_binop->IsSub());
+    if (input_binop->GetRight()->IsConstant() &&
+        input_binop->GetRight()->AsConstant()->IsMinusOne()) {
+      // Interpret
+      //    a * (b - (-1))
+      // as
+      //    a + (a * b)
+      input_b = input_binop->GetLeft();
+      op_kind = HInstruction::kAdd;
+    } else if (input_binop->GetLeft()->IsConstant() &&
+               input_binop->GetLeft()->AsConstant()->IsOne()) {
+      // Interpret
+      //    a * (1 - b)
+      // as
+      //    a - (a * b)
+      input_b = input_binop->GetRight();
+      op_kind = HInstruction::kSub;
+    }
+  }
+
+  if (input_b == nullptr) {
+    // We did not find a pattern we can optimize.
+    return false;
+  }
+
+  ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+  HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate(
+      mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
+
+  mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
+  input_binop->GetBlock()->RemoveInstruction(input_binop);
+
+  return true;
+}
+
+}  // namespace
+
+bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
+  Primitive::Type type = mul->GetType();
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      if (type != Primitive::kPrimInt) {
+        return false;
+      }
+      break;
+    case kArm64:
+      if (!Primitive::IsIntOrLongType(type)) {
+        return false;
+      }
+      break;
+    default:
+      return false;
+  }
+
+  HInstruction* use = mul->HasNonEnvironmentUses()
+      ? mul->GetUses().GetFirst()->GetUser()
+      : nullptr;
+
+  ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+
+  if (mul->HasOnlyOneNonEnvironmentUse()) {
+    if (use->IsAdd() || use->IsSub()) {
+      // Replace code looking like
+      //    MUL tmp, x, y
+      //    SUB dst, acc, tmp
+      // with
+      //    MULSUB dst, acc, x, y
+      // Note that we do not want to (unconditionally) perform the merge when the
+      // multiplication has multiple uses and it can be merged in all of them.
+      // Multiple uses could happen on the same control-flow path, and we would
+      // then increase the amount of work. In the future we could try to evaluate
+      // whether all uses are on different control-flow paths (using dominance and
+      // reverse-dominance information) and only perform the merge when they are.
+      HInstruction* accumulator = nullptr;
+      HBinaryOperation* binop = use->AsBinaryOperation();
+      HInstruction* binop_left = binop->GetLeft();
+      HInstruction* binop_right = binop->GetRight();
+      // Be careful after GVN. This should not happen since the `HMul` has only
+      // one use.
+      DCHECK_NE(binop_left, binop_right);
+      if (binop_right == mul) {
+        accumulator = binop_left;
+      } else if (use->IsAdd()) {
+        DCHECK_EQ(binop_left, mul);
+        accumulator = binop_right;
+      }
+
+      if (accumulator != nullptr) {
+        HMultiplyAccumulate* mulacc =
+            new (arena) HMultiplyAccumulate(type,
+                                            binop->GetKind(),
+                                            accumulator,
+                                            mul->GetLeft(),
+                                            mul->GetRight());
+
+        binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
+        DCHECK(!mul->HasUses());
+        mul->GetBlock()->RemoveInstruction(mul);
+        return true;
+      }
+    } else if (use->IsNeg() && isa != kArm) {
+      HMultiplyAccumulate* mulacc =
+          new (arena) HMultiplyAccumulate(type,
+                                          HInstruction::kSub,
+                                          mul->GetBlock()->GetGraph()->GetConstant(type, 0),
+                                          mul->GetLeft(),
+                                          mul->GetRight());
+
+      use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc);
+      DCHECK(!mul->HasUses());
+      mul->GetBlock()->RemoveInstruction(mul);
+      return true;
+    }
+  }
+
+  // Use multiply accumulate instruction for a few simple patterns.
+  // We prefer not applying the following transformations if the left and
+  // right inputs perform the same operation.
+  // We rely on GVN having squashed the inputs if appropriate. However the
+  // results are still correct even if that did not happen.
+  if (mul->GetLeft() == mul->GetRight()) {
+    return false;
+  }
+
+  HInstruction* left = mul->GetLeft();
+  HInstruction* right = mul->GetRight();
+  if ((right->IsAdd() || right->IsSub()) &&
+      TrySimpleMultiplyAccumulatePatterns(mul, right->AsBinaryOperation(), left)) {
+    return true;
+  }
+  if ((left->IsAdd() || left->IsSub()) &&
+      TrySimpleMultiplyAccumulatePatterns(mul, left->AsBinaryOperation(), right)) {
+    return true;
+  }
+  return false;
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
new file mode 100644
index 0000000..9832ecc
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_
+
+#include "nodes.h"
+
+namespace art {
+
+bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_SHARED_H_
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 316e86b..3ed0278 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -130,6 +130,10 @@
     case kIntrinsicFloatCvt:
       return ((method.d.data & kIntrinsicFlagToFloatingPoint) == 0) ?
           Intrinsics::kFloatFloatToRawIntBits : Intrinsics::kFloatIntBitsToFloat;
+    case kIntrinsicFloat2Int:
+      return Intrinsics::kFloatFloatToIntBits;
+    case kIntrinsicDouble2Long:
+      return Intrinsics::kDoubleDoubleToLongBits;
 
     // Floating-point tests.
     case kIntrinsicFloatIsInfinite:
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 2ab50bb..0cec5cc 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -193,6 +193,46 @@
 
 #undef INTRISIC_OPTIMIZATION
 
+//
+// Macros for use in the intrinsics code generators.
+//
+
+// Defines an unimplemented intrinsic: that is, a method call that is recognized as an
+// intrinsic to exploit e.g. no side-effects or exceptions, but otherwise not handled
+// by this architecture-specific intrinsics code generator. Eventually it is implemented
+// as a true method call.
+#define UNIMPLEMENTED_INTRINSIC(Arch, Name)                                               \
+void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+}                                                                                         \
+void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
+}
+
+// Defines a list of unreached intrinsics: that is, method calls that are recognized as
+// an intrinsic, and then always converted into HIR instructions before they reach any
+// architecture-specific intrinsics code generator.
+#define UNREACHABLE_INTRINSIC(Arch, Name)                                \
+void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
+  LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()      \
+             << " should have been converted to HIR";                    \
+}                                                                        \
+void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) {    \
+  LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()      \
+             << " should have been converted to HIR";                    \
+}
+#define UNREACHABLE_INTRINSICS(Arch)                \
+UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits)    \
+UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
+UNREACHABLE_INTRINSIC(Arch, FloatIsNaN)             \
+UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN)            \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft)      \
+UNREACHABLE_INTRINSIC(Arch, LongRotateLeft)         \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight)     \
+UNREACHABLE_INTRINSIC(Arch, LongRotateRight)        \
+UNREACHABLE_INTRINSIC(Arch, IntegerCompare)         \
+UNREACHABLE_INTRINSIC(Arch, LongCompare)            \
+UNREACHABLE_INTRINSIC(Arch, IntegerSignum)          \
+UNREACHABLE_INTRINSIC(Arch, LongSignum)
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_H_
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index ea8669f..69c9708 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1151,6 +1151,7 @@
 
   __ LoadFromOffset(kLoadWord, LR, TR,
                     QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
   __ blx(LR);
 
   if (slow_path != nullptr) {
@@ -1242,6 +1243,12 @@
 void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
   ArmAssembler* assembler = GetAssembler();
 
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   __ LoadFromOffset(
       kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1825,58 +1832,177 @@
   GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickNextAfter);
 }
 
-// Unimplemented intrinsics.
-
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
-void IntrinsicLocationsBuilderARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                      \
-void IntrinsicCodeGeneratorARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
+void IntrinsicLocationsBuilderARM::VisitIntegerReverse(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
 }
 
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(IntegerReverse)
-UNIMPLEMENTED_INTRINSIC(IntegerReverseBytes)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
-UNIMPLEMENTED_INTRINSIC(LongReverse)
-UNIMPLEMENTED_INTRINSIC(LongReverseBytes)
-UNIMPLEMENTED_INTRINSIC(ShortReverseBytes)
-UNIMPLEMENTED_INTRINSIC(MathMinDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMinFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMaxDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMaxFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMinLongLong)
-UNIMPLEMENTED_INTRINSIC(MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(MathCeil)          // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathFloor)         // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathRint)
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)   // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat)    // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)     // High register pressure.
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
+void IntrinsicCodeGeneratorARM::VisitIntegerReverse(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
+  Register out = locations->Out().AsRegister<Register>();
+  Register in  = locations->InAt(0).AsRegister<Register>();
 
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
+  __ rbit(out, in);
+}
 
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
+void IntrinsicLocationsBuilderARM::VisitLongReverse(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
 
-#undef UNIMPLEMENTED_INTRINSIC
+void IntrinsicCodeGeneratorARM::VisitLongReverse(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Register in_reg_lo  = locations->InAt(0).AsRegisterPairLow<Register>();
+  Register in_reg_hi  = locations->InAt(0).AsRegisterPairHigh<Register>();
+  Register out_reg_lo = locations->Out().AsRegisterPairLow<Register>();
+  Register out_reg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+  __ rbit(out_reg_lo, in_reg_hi);
+  __ rbit(out_reg_hi, in_reg_lo);
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerReverseBytes(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerReverseBytes(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Register out = locations->Out().AsRegister<Register>();
+  Register in  = locations->InAt(0).AsRegister<Register>();
+
+  __ rev(out, in);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongReverseBytes(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongReverseBytes(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Register in_reg_lo  = locations->InAt(0).AsRegisterPairLow<Register>();
+  Register in_reg_hi  = locations->InAt(0).AsRegisterPairHigh<Register>();
+  Register out_reg_lo = locations->Out().AsRegisterPairLow<Register>();
+  Register out_reg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+  __ rev(out_reg_lo, in_reg_hi);
+  __ rev(out_reg_hi, in_reg_lo);
+}
+
+void IntrinsicLocationsBuilderARM::VisitShortReverseBytes(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitShortReverseBytes(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Register out = locations->Out().AsRegister<Register>();
+  Register in  = locations->InAt(0).AsRegister<Register>();
+
+  __ revsh(out, in);
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Location of char array data in string.
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+  // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+  // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
+  Register srcObj = locations->InAt(0).AsRegister<Register>();
+  Register srcBegin = locations->InAt(1).AsRegister<Register>();
+  Register srcEnd = locations->InAt(2).AsRegister<Register>();
+  Register dstObj = locations->InAt(3).AsRegister<Register>();
+  Register dstBegin = locations->InAt(4).AsRegister<Register>();
+
+  Register src_ptr = locations->GetTemp(0).AsRegister<Register>();
+  Register src_ptr_end = locations->GetTemp(1).AsRegister<Register>();
+  Register dst_ptr = locations->GetTemp(2).AsRegister<Register>();
+  Register tmp = locations->GetTemp(3).AsRegister<Register>();
+
+  // src range to copy.
+  __ add(src_ptr, srcObj, ShifterOperand(value_offset));
+  __ add(src_ptr_end, src_ptr, ShifterOperand(srcEnd, LSL, 1));
+  __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
+
+  // dst to be copied.
+  __ add(dst_ptr, dstObj, ShifterOperand(data_offset));
+  __ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1));
+
+  // Do the copy.
+  Label loop, done;
+  __ Bind(&loop);
+  __ cmp(src_ptr, ShifterOperand(src_ptr_end));
+  __ b(&done, EQ);
+  __ ldrh(tmp, Address(src_ptr, char_size, Address::PostIndex));
+  __ strh(tmp, Address(dst_ptr, char_size, Address::PostIndex));
+  __ b(&loop);
+  __ Bind(&done);
+}
+
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinLongLong)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxLongLong)
+UNIMPLEMENTED_INTRINSIC(ARM, MathCeil)          // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathFloor)         // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathRint)
+UNIMPLEMENTED_INTRINSIC(ARM, MathRoundDouble)   // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat)    // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong)     // High register pressure.
+UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit)
+
+UNREACHABLE_INTRINSICS(ARM)
 
 #undef __
 
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 8741fd2..7a4a6ef 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -99,7 +99,8 @@
 //       restored!
 class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  explicit IntrinsicSlowPathARM64(HInvoke* invoke) : invoke_(invoke) { }
+  explicit IntrinsicSlowPathARM64(HInvoke* invoke)
+      : SlowPathCodeARM64(invoke), invoke_(invoke) { }
 
   void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
@@ -1300,6 +1301,7 @@
   }
 
   __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
   __ Blr(lr);
 
   if (slow_path != nullptr) {
@@ -1391,6 +1393,12 @@
 void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
   vixl::MacroAssembler* masm = GetVIXLAssembler();
 
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   __ Ldr(lr,
       MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1601,42 +1609,82 @@
   GenFPToFPCall(invoke, GetVIXLAssembler(), codegen_, kQuickNextAfter);
 }
 
-// Unimplemented intrinsics.
+void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
 
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
-void IntrinsicLocationsBuilderARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                      \
-void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
 }
 
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
+void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  vixl::MacroAssembler* masm = GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
 
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
 
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
+  // Location of char array data in string.
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
 
-#undef UNIMPLEMENTED_INTRINSIC
+  // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+  // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
+  Register srcObj = XRegisterFrom(locations->InAt(0));
+  Register srcBegin = XRegisterFrom(locations->InAt(1));
+  Register srcEnd = XRegisterFrom(locations->InAt(2));
+  Register dstObj = XRegisterFrom(locations->InAt(3));
+  Register dstBegin = XRegisterFrom(locations->InAt(4));
+
+  Register src_ptr = XRegisterFrom(locations->GetTemp(0));
+  Register src_ptr_end = XRegisterFrom(locations->GetTemp(1));
+
+  UseScratchRegisterScope temps(masm);
+  Register dst_ptr = temps.AcquireX();
+  Register tmp = temps.AcquireW();
+
+  // src range to copy.
+  __ Add(src_ptr, srcObj, Operand(value_offset));
+  __ Add(src_ptr_end, src_ptr, Operand(srcEnd, LSL, 1));
+  __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
+
+  // dst to be copied.
+  __ Add(dst_ptr, dstObj, Operand(data_offset));
+  __ Add(dst_ptr, dst_ptr, Operand(dstBegin, LSL, 1));
+
+  // Do the copy.
+  vixl::Label loop, done;
+  __ Bind(&loop);
+  __ Cmp(src_ptr, src_ptr_end);
+  __ B(&done, eq);
+  __ Ldrh(tmp, MemOperand(src_ptr, char_size, vixl::PostIndex));
+  __ Strh(tmp, MemOperand(dst_ptr, char_size, vixl::PostIndex));
+  __ B(&loop);
+  __ Bind(&done);
+}
+
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit)
+
+UNREACHABLE_INTRINSICS(ARM64)
 
 #undef __
 
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 88217b3..b8933e1 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -19,14 +19,16 @@
 
 // All intrinsics supported by the optimizing compiler. Format is name, then whether it is expected
 // to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an
-// environment.
+// environment, may have side effects, or may throw exceptions.
 
 #define INTRINSICS_LIST(V) \
   V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+  V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(DoubleIsInfinite, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(DoubleIsNaN, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+  V(FloatFloatToIntBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(FloatIsInfinite, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(FloatIsNaN, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
   V(FloatIntBitsToFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index c862964..5a35dd5 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -99,7 +99,7 @@
 //       restored!
 class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : invoke_(invoke) { }
+  explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
 
   void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
     CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
@@ -407,7 +407,7 @@
              Primitive::kPrimInt,
              IsR2OrNewer(),
              IsR6(),
-             false,
+             /* reverseBits */ false,
              GetAssembler());
 }
 
@@ -421,7 +421,7 @@
              Primitive::kPrimLong,
              IsR2OrNewer(),
              IsR6(),
-             false,
+             /* reverseBits */ false,
              GetAssembler());
 }
 
@@ -435,7 +435,7 @@
              Primitive::kPrimShort,
              IsR2OrNewer(),
              IsR6(),
-             false,
+             /* reverseBits */ false,
              GetAssembler());
 }
 
@@ -475,7 +475,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), false, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
 }
 
 // int java.lang.Long.numberOfLeadingZeros(long i)
@@ -484,7 +484,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), true, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
 }
 
 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -497,7 +497,6 @@
   Register in;
 
   if (is64bit) {
-    MipsLabel done;
     Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
 
     in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
@@ -588,7 +587,11 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), false, IsR6(), IsR2OrNewer(), GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(),
+                            /* is64bit */ false,
+                            IsR6(),
+                            IsR2OrNewer(),
+                            GetAssembler());
 }
 
 // int java.lang.Long.numberOfTrailingZeros(long i)
@@ -597,203 +600,11 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), true, IsR6(), IsR2OrNewer(), GetAssembler());
-}
-
-enum RotationDirection {
-  kRotateRight,
-  kRotateLeft,
-};
-
-static void GenRotate(HInvoke* invoke,
-                      Primitive::Type type,
-                      bool isR2OrNewer,
-                      RotationDirection direction,
-                      MipsAssembler* assembler) {
-  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
-  LocationSummary* locations = invoke->GetLocations();
-  if (invoke->InputAt(1)->IsIntConstant()) {
-    int32_t shift = static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
-    if (type == Primitive::kPrimInt) {
-      Register in = locations->InAt(0).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      shift &= 0x1f;
-      if (direction == kRotateLeft) {
-        shift = (32 - shift) & 0x1F;
-      }
-
-      if (isR2OrNewer) {
-        if ((shift != 0) || (out != in)) {
-          __ Rotr(out, in, shift);
-        }
-      } else {
-        if (shift == 0) {
-          if (out != in) {
-            __ Move(out, in);
-          }
-        } else {
-          __ Srl(AT, in, shift);
-          __ Sll(out, in, 32 - shift);
-          __ Or(out, out, AT);
-        }
-      }
-    } else {    // Primitive::kPrimLong
-      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      shift &= 0x3f;
-      if (direction == kRotateLeft) {
-        shift = (64 - shift) & 0x3F;
-      }
-
-      if (shift == 0) {
-        __ Move(out_lo, in_lo);
-        __ Move(out_hi, in_hi);
-      } else if (shift == 32) {
-        __ Move(out_lo, in_hi);
-        __ Move(out_hi, in_lo);
-      } else if (shift < 32) {
-        __ Srl(AT, in_lo, shift);
-        __ Sll(out_lo, in_hi, 32 - shift);
-        __ Or(out_lo, out_lo, AT);
-        __ Srl(AT, in_hi, shift);
-        __ Sll(out_hi, in_lo, 32 - shift);
-        __ Or(out_hi, out_hi, AT);
-      } else {
-        __ Sll(AT, in_lo, 64 - shift);
-        __ Srl(out_lo, in_hi, shift - 32);
-        __ Or(out_lo, out_lo, AT);
-        __ Sll(AT, in_hi, 64 - shift);
-        __ Srl(out_hi, in_lo, shift - 32);
-        __ Or(out_hi, out_hi, AT);
-      }
-    }
-  } else {      // !invoke->InputAt(1)->IsIntConstant()
-    Register shamt = locations->InAt(1).AsRegister<Register>();
-    if (type == Primitive::kPrimInt) {
-      Register in = locations->InAt(0).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      if (isR2OrNewer) {
-        if (direction == kRotateRight) {
-          __ Rotrv(out, in, shamt);
-        } else {
-          // negu tmp, shamt
-          __ Subu(TMP, ZERO, shamt);
-          __ Rotrv(out, in, TMP);
-        }
-      } else {
-        if (direction == kRotateRight) {
-          __ Srlv(AT, in, shamt);
-          __ Subu(TMP, ZERO, shamt);
-          __ Sllv(out, in, TMP);
-          __ Or(out, out, AT);
-        } else {
-          __ Sllv(AT, in, shamt);
-          __ Subu(TMP, ZERO, shamt);
-          __ Srlv(out, in, TMP);
-          __ Or(out, out, AT);
-        }
-      }
-    } else {    // Primitive::kPrimLong
-      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      MipsLabel done;
-
-      if (direction == kRotateRight) {
-        __ Nor(TMP, ZERO, shamt);
-        __ Srlv(AT, in_lo, shamt);
-        __ Sll(out_lo, in_hi, 1);
-        __ Sllv(out_lo, out_lo, TMP);
-        __ Or(out_lo, out_lo, AT);
-        __ Srlv(AT, in_hi, shamt);
-        __ Sll(out_hi, in_lo, 1);
-        __ Sllv(out_hi, out_hi, TMP);
-        __ Or(out_hi, out_hi, AT);
-      } else {
-        __ Nor(TMP, ZERO, shamt);
-        __ Sllv(AT, in_lo, shamt);
-        __ Srl(out_lo, in_hi, 1);
-        __ Srlv(out_lo, out_lo, TMP);
-        __ Or(out_lo, out_lo, AT);
-        __ Sllv(AT, in_hi, shamt);
-        __ Srl(out_hi, in_lo, 1);
-        __ Srlv(out_hi, out_hi, TMP);
-        __ Or(out_hi, out_hi, AT);
-      }
-
-      __ Andi(TMP, shamt, 32);
-      __ Beqz(TMP, &done);
-      __ Move(TMP, out_hi);
-      __ Move(out_hi, out_lo);
-      __ Move(out_lo, TMP);
-
-      __ Bind(&done);
-    }
-  }
-}
-
-// int java.lang.Integer.rotateRight(int i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateRight(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                            LocationSummary::kNoCall,
-                                                            kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateRight(HInvoke* invoke) {
-  GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateRight, GetAssembler());
-}
-
-// long java.lang.Long.rotateRight(long i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitLongRotateRight(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                            LocationSummary::kNoCall,
-                                                            kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongRotateRight(HInvoke* invoke) {
-  GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateRight, GetAssembler());
-}
-
-// int java.lang.Integer.rotateLeft(int i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateLeft(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                            LocationSummary::kNoCall,
-                                                            kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateLeft(HInvoke* invoke) {
-  GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateLeft, GetAssembler());
-}
-
-// long java.lang.Long.rotateLeft(long i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitLongRotateLeft(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                            LocationSummary::kNoCall,
-                                                            kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongRotateLeft(HInvoke* invoke) {
-  GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateLeft, GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(),
+                            /* is64bit */ true,
+                            IsR6(),
+                            IsR2OrNewer(),
+                            GetAssembler());
 }
 
 // int java.lang.Integer.reverse(int)
@@ -806,7 +617,7 @@
              Primitive::kPrimInt,
              IsR2OrNewer(),
              IsR6(),
-             true,
+             /* reverseBits */ true,
              GetAssembler());
 }
 
@@ -820,10 +631,561 @@
              Primitive::kPrimLong,
              IsR2OrNewer(),
              IsR6(),
-             true,
+             /* reverseBits */ true,
              GetAssembler());
 }
 
+static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+}
+
+static void MathAbsFP(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
+  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
+  FRegister out = locations->Out().AsFpuRegister<FRegister>();
+
+  if (is64bit) {
+    __ AbsD(out, in);
+  } else {
+    __ AbsS(out, in);
+  }
+}
+
+// double java.lang.Math.abs(double)
+void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
+  MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+}
+
+// float java.lang.Math.abs(float)
+void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) {
+  MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+}
+
+static void GenAbsInteger(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
+  if (is64bit) {
+    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+    Register out_lo = locations->Out().AsRegisterPairLow<Register>();
+    Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+    // The comments in this section show the analogous operations which would
+    // be performed if we had 64-bit registers "in", and "out".
+    // __ Dsra32(AT, in, 31);
+    __ Sra(AT, in_hi, 31);
+    // __ Xor(out, in, AT);
+    __ Xor(TMP, in_lo, AT);
+    __ Xor(out_hi, in_hi, AT);
+    // __ Dsubu(out, out, AT);
+    __ Subu(out_lo, TMP, AT);
+    __ Sltu(TMP, out_lo, TMP);
+    __ Addu(out_hi, out_hi, TMP);
+  } else {
+    Register in = locations->InAt(0).AsRegister<Register>();
+    Register out = locations->Out().AsRegister<Register>();
+
+    __ Sra(AT, in, 31);
+    __ Xor(out, in, AT);
+    __ Subu(out, out, AT);
+  }
+}
+
+// int java.lang.Math.abs(int)
+void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) {
+  GenAbsInteger(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+}
+
+// long java.lang.Math.abs(long)
+void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) {
+  GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+}
+
+static void GenMinMaxFP(LocationSummary* locations,
+                        bool is_min,
+                        Primitive::Type type,
+                        bool is_R6,
+                        MipsAssembler* assembler) {
+  FRegister out = locations->Out().AsFpuRegister<FRegister>();
+  FRegister a = locations->InAt(0).AsFpuRegister<FRegister>();
+  FRegister b = locations->InAt(1).AsFpuRegister<FRegister>();
+
+  if (is_R6) {
+    MipsLabel noNaNs;
+    MipsLabel done;
+    FRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
+
+    // When Java computes min/max it prefers a NaN to a number; the
+    // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
+    // the inputs is a NaN and the other is a valid number, the MIPS
+    // instruction will return the number; Java wants the NaN value
+    // returned. This is why there is extra logic preceding the use of
+    // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
+    // NaN, return the NaN, otherwise return the min/max.
+    if (type == Primitive::kPrimDouble) {
+      __ CmpUnD(FTMP, a, b);
+      __ Bc1eqz(FTMP, &noNaNs);
+
+      // One of the inputs is a NaN
+      __ CmpEqD(ftmp, a, a);
+      // If a == a then b is the NaN, otherwise a is the NaN.
+      __ SelD(ftmp, a, b);
+
+      if (ftmp != out) {
+        __ MovD(out, ftmp);
+      }
+
+      __ B(&done);
+
+      __ Bind(&noNaNs);
+
+      if (is_min) {
+        __ MinD(out, a, b);
+      } else {
+        __ MaxD(out, a, b);
+      }
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimFloat);
+      __ CmpUnS(FTMP, a, b);
+      __ Bc1eqz(FTMP, &noNaNs);
+
+      // One of the inputs is a NaN
+      __ CmpEqS(ftmp, a, a);
+      // If a == a then b is the NaN, otherwise a is the NaN.
+      __ SelS(ftmp, a, b);
+
+      if (ftmp != out) {
+        __ MovS(out, ftmp);
+      }
+
+      __ B(&done);
+
+      __ Bind(&noNaNs);
+
+      if (is_min) {
+        __ MinS(out, a, b);
+      } else {
+        __ MaxS(out, a, b);
+      }
+    }
+
+    __ Bind(&done);
+  } else {
+    MipsLabel ordered;
+    MipsLabel compare;
+    MipsLabel select;
+    MipsLabel done;
+
+    if (type == Primitive::kPrimDouble) {
+      __ CunD(a, b);
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimFloat);
+      __ CunS(a, b);
+    }
+    __ Bc1f(&ordered);
+
+    // a or b (or both) is a NaN. Return one, which is a NaN.
+    if (type == Primitive::kPrimDouble) {
+      __ CeqD(b, b);
+    } else {
+      __ CeqS(b, b);
+    }
+    __ B(&select);
+
+    __ Bind(&ordered);
+
+    // Neither is a NaN.
+    // a == b? (-0.0 compares equal with +0.0)
+    // If equal, handle zeroes, else compare further.
+    if (type == Primitive::kPrimDouble) {
+      __ CeqD(a, b);
+    } else {
+      __ CeqS(a, b);
+    }
+    __ Bc1f(&compare);
+
+    // a == b either bit for bit or one is -0.0 and the other is +0.0.
+    if (type == Primitive::kPrimDouble) {
+      __ MoveFromFpuHigh(TMP, a);
+      __ MoveFromFpuHigh(AT, b);
+    } else {
+      __ Mfc1(TMP, a);
+      __ Mfc1(AT, b);
+    }
+
+    if (is_min) {
+      // -0.0 prevails over +0.0.
+      __ Or(TMP, TMP, AT);
+    } else {
+      // +0.0 prevails over -0.0.
+      __ And(TMP, TMP, AT);
+    }
+
+    if (type == Primitive::kPrimDouble) {
+      __ Mfc1(AT, a);
+      __ Mtc1(AT, out);
+      __ MoveToFpuHigh(TMP, out);
+    } else {
+      __ Mtc1(TMP, out);
+    }
+    __ B(&done);
+
+    __ Bind(&compare);
+
+    if (type == Primitive::kPrimDouble) {
+      if (is_min) {
+        // return (a <= b) ? a : b;
+        __ ColeD(a, b);
+      } else {
+        // return (a >= b) ? a : b;
+        __ ColeD(b, a);  // b <= a
+      }
+    } else {
+      if (is_min) {
+        // return (a <= b) ? a : b;
+        __ ColeS(a, b);
+      } else {
+        // return (a >= b) ? a : b;
+        __ ColeS(b, a);  // b <= a
+      }
+    }
+
+    __ Bind(&select);
+
+    if (type == Primitive::kPrimDouble) {
+      __ MovtD(out, a);
+      __ MovfD(out, b);
+    } else {
+      __ MovtS(out, a);
+      __ MovfS(out, b);
+    }
+
+    __ Bind(&done);
+  }
+}
+
+static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
+}
+
+// double java.lang.Math.min(double, double)
+void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ true,
+              Primitive::kPrimDouble,
+              IsR6(),
+              GetAssembler());
+}
+
+// float java.lang.Math.min(float, float)
+void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ true,
+              Primitive::kPrimFloat,
+              IsR6(),
+              GetAssembler());
+}
+
+// double java.lang.Math.max(double, double)
+void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ false,
+              Primitive::kPrimDouble,
+              IsR6(),
+              GetAssembler());
+}
+
+// float java.lang.Math.max(float, float)
+void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ false,
+              Primitive::kPrimFloat,
+              IsR6(),
+              GetAssembler());
+}
+
+static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenMinMax(LocationSummary* locations,
+                      bool is_min,
+                      Primitive::Type type,
+                      bool is_R6,
+                      MipsAssembler* assembler) {
+  if (is_R6) {
+    // Some architectures, such as ARM and MIPS (prior to r6), have a
+    // conditional move instruction which only changes the target
+    // (output) register if the condition is true (MIPS prior to r6 had
+    // MOVF, MOVT, MOVN, and MOVZ). The SELEQZ and SELNEZ instructions
+    // always change the target (output) register.  If the condition is
+    // true the output register gets the contents of the "rs" register;
+    // otherwise, the output register is set to zero. One consequence
+    // of this is that to implement something like "rd = c==0 ? rs : rt"
+    // MIPS64r6 needs to use a pair of SELEQZ/SELNEZ instructions.
+    // After executing this pair of instructions one of the output
+    // registers from the pair will necessarily contain zero. Then the
+    // code ORs the output registers from the SELEQZ/SELNEZ instructions
+    // to get the final result.
+    //
+    // The initial test to see if the output register is same as the
+    // first input register is needed to make sure that value in the
+    // first input register isn't clobbered before we've finished
+    // computing the output value. The logic in the corresponding else
+    // clause performs the same task but makes sure the second input
+    // register isn't clobbered in the event that it's the same register
+    // as the output register; the else clause also handles the case
+    // where the output register is distinct from both the first, and the
+    // second input registers.
+    if (type == Primitive::kPrimLong) {
+      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
+      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
+      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+      MipsLabel compare_done;
+
+      if (a_lo == b_lo) {
+        if (out_lo != a_lo) {
+          __ Move(out_lo, a_lo);
+          __ Move(out_hi, a_hi);
+        }
+      } else {
+        __ Slt(TMP, b_hi, a_hi);
+        __ Bne(b_hi, a_hi, &compare_done);
+
+        __ Sltu(TMP, b_lo, a_lo);
+
+        __ Bind(&compare_done);
+
+        if (is_min) {
+          __ Seleqz(AT, a_lo, TMP);
+          __ Selnez(out_lo, b_lo, TMP);  // Safe even if out_lo == a_lo/b_lo
+                                         // because at this point we're
+                                         // done using a_lo/b_lo.
+        } else {
+          __ Selnez(AT, a_lo, TMP);
+          __ Seleqz(out_lo, b_lo, TMP);  // ditto
+        }
+        __ Or(out_lo, out_lo, AT);
+        if (is_min) {
+          __ Seleqz(AT, a_hi, TMP);
+          __ Selnez(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
+        } else {
+          __ Selnez(AT, a_hi, TMP);
+          __ Seleqz(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
+        }
+        __ Or(out_hi, out_hi, AT);
+      }
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimInt);
+      Register a = locations->InAt(0).AsRegister<Register>();
+      Register b = locations->InAt(1).AsRegister<Register>();
+      Register out = locations->Out().AsRegister<Register>();
+
+      if (a == b) {
+        if (out != a) {
+          __ Move(out, a);
+        }
+      } else {
+        __ Slt(AT, b, a);
+        if (is_min) {
+          __ Seleqz(TMP, a, AT);
+          __ Selnez(AT, b, AT);
+        } else {
+          __ Selnez(TMP, a, AT);
+          __ Seleqz(AT, b, AT);
+        }
+        __ Or(out, TMP, AT);
+      }
+    }
+  } else {
+    if (type == Primitive::kPrimLong) {
+      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
+      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
+      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+      MipsLabel compare_done;
+
+      if (a_lo == b_lo) {
+        if (out_lo != a_lo) {
+          __ Move(out_lo, a_lo);
+          __ Move(out_hi, a_hi);
+        }
+      } else {
+        __ Slt(TMP, a_hi, b_hi);
+        __ Bne(a_hi, b_hi, &compare_done);
+
+        __ Sltu(TMP, a_lo, b_lo);
+
+        __ Bind(&compare_done);
+
+        if (is_min) {
+          if (out_lo != a_lo) {
+            __ Movn(out_hi, a_hi, TMP);
+            __ Movn(out_lo, a_lo, TMP);
+          }
+          if (out_lo != b_lo) {
+            __ Movz(out_hi, b_hi, TMP);
+            __ Movz(out_lo, b_lo, TMP);
+          }
+        } else {
+          if (out_lo != a_lo) {
+            __ Movz(out_hi, a_hi, TMP);
+            __ Movz(out_lo, a_lo, TMP);
+          }
+          if (out_lo != b_lo) {
+            __ Movn(out_hi, b_hi, TMP);
+            __ Movn(out_lo, b_lo, TMP);
+          }
+        }
+      }
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimInt);
+      Register a = locations->InAt(0).AsRegister<Register>();
+      Register b = locations->InAt(1).AsRegister<Register>();
+      Register out = locations->Out().AsRegister<Register>();
+
+      if (a == b) {
+        if (out != a) {
+          __ Move(out, a);
+        }
+      } else {
+        __ Slt(AT, a, b);
+        if (is_min) {
+          if (out != a) {
+            __ Movn(out, a, AT);
+          }
+          if (out != b) {
+            __ Movz(out, b, AT);
+          }
+        } else {
+          if (out != a) {
+            __ Movz(out, a, AT);
+          }
+          if (out != b) {
+            __ Movn(out, b, AT);
+          }
+        }
+      }
+    }
+  }
+}
+
+// int java.lang.Math.min(int, int)
+void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
+  GenMinMax(invoke->GetLocations(),
+            /* is_min */ true,
+            Primitive::kPrimInt,
+            IsR6(),
+            GetAssembler());
+}
+
+// long java.lang.Math.min(long, long)
+void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
+  GenMinMax(invoke->GetLocations(),
+            /* is_min */ true,
+            Primitive::kPrimLong,
+            IsR6(),
+            GetAssembler());
+}
+
+// int java.lang.Math.max(int, int)
+void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
+  GenMinMax(invoke->GetLocations(),
+            /* is_min */ false,
+            Primitive::kPrimInt,
+            IsR6(),
+            GetAssembler());
+}
+
+// long java.lang.Math.max(long, long)
+void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
+  GenMinMax(invoke->GetLocations(),
+            /* is_min */ false,
+            Primitive::kPrimLong,
+            IsR6(),
+            GetAssembler());
+}
+
+// double java.lang.Math.sqrt(double)
+void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
+  LocationSummary* locations = invoke->GetLocations();
+  MipsAssembler* assembler = GetAssembler();
+  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
+  FRegister out = locations->Out().AsFpuRegister<FRegister>();
+
+  __ SqrtD(out, in);
+}
+
 // byte libcore.io.Memory.peekByte(long address)
 void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
   CreateIntToIntLocations(arena_, invoke);
@@ -1140,101 +1502,72 @@
   __ Bind(&end);
 }
 
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongBitCount)
 
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
-void IntrinsicLocationsBuilderMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                      \
-void IntrinsicCodeGeneratorMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
-}
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(MIPS, ThreadCurrentThread)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePut)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringCompareTo)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOf)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromChars)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromString)
 
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
 
-UNIMPLEMENTED_INTRINSIC(MathAbsDouble)
-UNIMPLEMENTED_INTRINSIC(MathAbsFloat)
-UNIMPLEMENTED_INTRINSIC(MathAbsInt)
-UNIMPLEMENTED_INTRINSIC(MathAbsLong)
-UNIMPLEMENTED_INTRINSIC(MathMinDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMinFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMaxDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMaxFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMinIntInt)
-UNIMPLEMENTED_INTRINSIC(MathMinLongLong)
-UNIMPLEMENTED_INTRINSIC(MathMaxIntInt)
-UNIMPLEMENTED_INTRINSIC(MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(MathSqrt)
-UNIMPLEMENTED_INTRINSIC(MathCeil)
-UNIMPLEMENTED_INTRINSIC(MathFloor)
-UNIMPLEMENTED_INTRINSIC(MathRint)
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
-UNIMPLEMENTED_INTRINSIC(UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePut)
-UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCos)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathSin)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAcos)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAsin)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan2)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCbrt)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCosh)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathExp)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathExpm1)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathHypot)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathLog)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathLog10)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathNextAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathSinh)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathTan)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh)
 
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(MIPS, DoubleIsInfinite)
 
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongLowestOneBit)
 
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(MIPS)
 
 #undef __
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index cf3a365..45611f0 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -87,7 +87,8 @@
 //       restored!
 class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { }
+  explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
+     : SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
 
   void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
     CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
@@ -339,130 +340,6 @@
   GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
 }
 
-static void GenRotateRight(HInvoke* invoke,
-                           Primitive::Type type,
-                           Mips64Assembler* assembler) {
-  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
-  LocationSummary* locations = invoke->GetLocations();
-  GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (invoke->InputAt(1)->IsIntConstant()) {
-    uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
-    if (type == Primitive::kPrimInt) {
-      shift &= 0x1f;
-      __ Rotr(out, in, shift);
-    } else {
-      shift &= 0x3f;
-      if (shift < 32) {
-        __ Drotr(out, in, shift);
-      } else {
-        shift &= 0x1f;
-        __ Drotr32(out, in, shift);
-      }
-    }
-  } else {
-    GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
-    if (type == Primitive::kPrimInt) {
-      __ Rotrv(out, in, shamt);
-    } else {
-      __ Drotrv(out, in, shamt);
-    }
-  }
-}
-
-// int java.lang.Integer.rotateRight(int i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
-  GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
-}
-
-// long java.lang.Long.rotateRight(long i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) {
-  GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler());
-}
-
-static void GenRotateLeft(HInvoke* invoke,
-                           Primitive::Type type,
-                           Mips64Assembler* assembler) {
-  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
-  LocationSummary* locations = invoke->GetLocations();
-  GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (invoke->InputAt(1)->IsIntConstant()) {
-    int32_t shift = -static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
-    if (type == Primitive::kPrimInt) {
-      shift &= 0x1f;
-      __ Rotr(out, in, shift);
-    } else {
-      shift &= 0x3f;
-      if (shift < 32) {
-        __ Drotr(out, in, shift);
-      } else {
-        shift &= 0x1f;
-        __ Drotr32(out, in, shift);
-      }
-    }
-  } else {
-    GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
-    if (type == Primitive::kPrimInt) {
-      __ Subu(TMP, ZERO, shamt);
-      __ Rotrv(out, in, TMP);
-    } else {
-      __ Dsubu(TMP, ZERO, shamt);
-      __ Drotrv(out, in, TMP);
-    }
-  }
-}
-
-// int java.lang.Integer.rotateLeft(int i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
-  GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
-}
-
-// long java.lang.Long.rotateLeft(long i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
-  GenRotateLeft(invoke, Primitive::kPrimLong, GetAssembler());
-}
-
 static void GenReverse(LocationSummary* locations,
                        Primitive::Type type,
                        Mips64Assembler* assembler) {
@@ -580,25 +457,71 @@
 
 static void GenMinMaxFP(LocationSummary* locations,
                         bool is_min,
-                        bool is_double,
+                        Primitive::Type type,
                         Mips64Assembler* assembler) {
-  FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+  FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>();
+  FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>();
   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
 
-  if (is_double) {
+  Mips64Label noNaNs;
+  Mips64Label done;
+  FpuRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
+
+  // When Java computes min/max it prefers a NaN to a number; the
+  // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
+  // the inputs is a NaN and the other is a valid number, the MIPS
+  // instruction will return the number; Java wants the NaN value
+  // returned. This is why there is extra logic preceding the use of
+  // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
+  // NaN, return the NaN, otherwise return the min/max.
+  if (type == Primitive::kPrimDouble) {
+    __ CmpUnD(FTMP, a, b);
+    __ Bc1eqz(FTMP, &noNaNs);
+
+    // One of the inputs is a NaN
+    __ CmpEqD(ftmp, a, a);
+    // If a == a then b is the NaN, otherwise a is the NaN.
+    __ SelD(ftmp, a, b);
+
+    if (ftmp != out) {
+      __ MovD(out, ftmp);
+    }
+
+    __ Bc(&done);
+
+    __ Bind(&noNaNs);
+
     if (is_min) {
-      __ MinD(out, lhs, rhs);
+      __ MinD(out, a, b);
     } else {
-      __ MaxD(out, lhs, rhs);
+      __ MaxD(out, a, b);
     }
   } else {
+    DCHECK_EQ(type, Primitive::kPrimFloat);
+    __ CmpUnS(FTMP, a, b);
+    __ Bc1eqz(FTMP, &noNaNs);
+
+    // One of the inputs is a NaN
+    __ CmpEqS(ftmp, a, a);
+    // If a == a then b is the NaN, otherwise a is the NaN.
+    __ SelS(ftmp, a, b);
+
+    if (ftmp != out) {
+      __ MovS(out, ftmp);
+    }
+
+    __ Bc(&done);
+
+    __ Bind(&noNaNs);
+
     if (is_min) {
-      __ MinS(out, lhs, rhs);
+      __ MinS(out, a, b);
     } else {
-      __ MaxS(out, lhs, rhs);
+      __ MaxS(out, a, b);
     }
   }
+
+  __ Bind(&done);
 }
 
 static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -616,7 +539,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ true, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimDouble, GetAssembler());
 }
 
 // float java.lang.Math.min(float, float)
@@ -625,7 +548,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ false, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimFloat, GetAssembler());
 }
 
 // double java.lang.Math.max(double, double)
@@ -634,7 +557,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ true, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimDouble, GetAssembler());
 }
 
 // float java.lang.Math.max(float, float)
@@ -643,7 +566,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ false, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimFloat, GetAssembler());
 }
 
 static void GenMinMax(LocationSummary* locations,
@@ -653,49 +576,55 @@
   GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
 
-  // Some architectures, such as ARM and MIPS (prior to r6), have a
-  // conditional move instruction which only changes the target
-  // (output) register if the condition is true (MIPS prior to r6 had
-  // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
-  // change the target (output) register.  If the condition is true the
-  // output register gets the contents of the "rs" register; otherwise,
-  // the output register is set to zero. One consequence of this is
-  // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
-  // needs to use a pair of SELEQZ/SELNEZ instructions.  After
-  // executing this pair of instructions one of the output registers
-  // from the pair will necessarily contain zero. Then the code ORs the
-  // output registers from the SELEQZ/SELNEZ instructions to get the
-  // final result.
-  //
-  // The initial test to see if the output register is same as the
-  // first input register is needed to make sure that value in the
-  // first input register isn't clobbered before we've finished
-  // computing the output value. The logic in the corresponding else
-  // clause performs the same task but makes sure the second input
-  // register isn't clobbered in the event that it's the same register
-  // as the output register; the else clause also handles the case
-  // where the output register is distinct from both the first, and the
-  // second input registers.
-  if (out == lhs) {
-    __ Slt(AT, rhs, lhs);
-    if (is_min) {
-      __ Seleqz(out, lhs, AT);
-      __ Selnez(AT, rhs, AT);
-    } else {
-      __ Selnez(out, lhs, AT);
-      __ Seleqz(AT, rhs, AT);
+  if (lhs == rhs) {
+    if (out != lhs) {
+      __ Move(out, lhs);
     }
   } else {
-    __ Slt(AT, lhs, rhs);
-    if (is_min) {
-      __ Seleqz(out, rhs, AT);
-      __ Selnez(AT, lhs, AT);
+    // Some architectures, such as ARM and MIPS (prior to r6), have a
+    // conditional move instruction which only changes the target
+    // (output) register if the condition is true (MIPS prior to r6 had
+    // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
+    // change the target (output) register.  If the condition is true the
+    // output register gets the contents of the "rs" register; otherwise,
+    // the output register is set to zero. One consequence of this is
+    // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
+    // needs to use a pair of SELEQZ/SELNEZ instructions.  After
+    // executing this pair of instructions one of the output registers
+    // from the pair will necessarily contain zero. Then the code ORs the
+    // output registers from the SELEQZ/SELNEZ instructions to get the
+    // final result.
+    //
+    // The initial test to see if the output register is same as the
+    // first input register is needed to make sure that value in the
+    // first input register isn't clobbered before we've finished
+    // computing the output value. The logic in the corresponding else
+    // clause performs the same task but makes sure the second input
+    // register isn't clobbered in the event that it's the same register
+    // as the output register; the else clause also handles the case
+    // where the output register is distinct from both the first, and the
+    // second input registers.
+    if (out == lhs) {
+      __ Slt(AT, rhs, lhs);
+      if (is_min) {
+        __ Seleqz(out, lhs, AT);
+        __ Selnez(AT, rhs, AT);
+      } else {
+        __ Selnez(out, lhs, AT);
+        __ Seleqz(AT, rhs, AT);
+      }
     } else {
-      __ Selnez(out, rhs, AT);
-      __ Seleqz(AT, lhs, AT);
+      __ Slt(AT, lhs, rhs);
+      if (is_min) {
+        __ Seleqz(out, rhs, AT);
+        __ Selnez(AT, lhs, AT);
+      } else {
+        __ Selnez(out, rhs, AT);
+        __ Seleqz(AT, lhs, AT);
+      }
     }
+    __ Or(out, out, AT);
   }
-  __ Or(out, out, AT);
 }
 
 static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -1583,6 +1512,7 @@
                     TMP,
                     TR,
                     QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value());
+  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
   __ Jalr(TMP);
   __ Nop();
 
@@ -1632,7 +1562,7 @@
       invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
 }
 
-// java.lang.String.String(byte[] bytes)
+// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
 void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                             LocationSummary::kCall,
@@ -1666,7 +1596,7 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
-// java.lang.String.String(char[] value)
+// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
 void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                             LocationSummary::kCall,
@@ -1682,6 +1612,12 @@
 void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
   Mips64Assembler* assembler = GetAssembler();
 
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   __ LoadFromOffset(kLoadDoubleword,
                     TMP,
                     TR,
@@ -1725,60 +1661,44 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongBitCount)
 
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
-void IntrinsicLocationsBuilderMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                      \
-void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
-}
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundFloat)
 
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
 
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCos)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathSin)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAcos)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAsin)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan2)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCbrt)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCosh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathExp)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathExpm1)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathHypot)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog10)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathNextAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathSinh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh)
 
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(MIPS64, DoubleIsInfinite)
 
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongLowestOneBit)
 
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(MIPS64)
 
 #undef __
 
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index e70afd2..c1f9ae6 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -39,7 +39,7 @@
 template <typename TDexCallingConvention>
 class IntrinsicSlowPath : public SlowPathCode {
  public:
-  explicit IntrinsicSlowPath(HInvoke* invoke) : invoke_(invoke) { }
+  explicit IntrinsicSlowPath(HInvoke* invoke) : SlowPathCode(invoke), invoke_(invoke) { }
 
   Location MoveArguments(CodeGenerator* codegen) {
     TDexCallingConvention calling_convention_visitor;
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 260a877..9a2dc41 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1564,6 +1564,12 @@
 void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
   X86Assembler* assembler = GetAssembler();
 
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
@@ -2621,39 +2627,17 @@
   GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
 
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                   \
-void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                       \
-void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
-}
-
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(X86)
 
 #undef __
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 93e8c00..75204b4 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1659,6 +1659,12 @@
 void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
   X86_64Assembler* assembler = GetAssembler();
 
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
   __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
                                   /* no_rip */ true));
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2705,32 +2711,11 @@
   GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
 
-#define UNIMPLEMENTED_INTRINSIC(Name)                                                   \
-void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}                                                                                       \
-void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
-}
-
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(X86_64)
 
 #undef __
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f36dc6e..77ded29 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -127,6 +127,9 @@
       // Remove the block from the list of blocks, so that further analyses
       // never see it.
       blocks_[i] = nullptr;
+      if (block->IsExitBlock()) {
+        SetExitBlock(nullptr);
+      }
     }
   }
 }
@@ -1178,19 +1181,19 @@
 }
 
 HConstant* HBinaryOperation::TryStaticEvaluation() const {
-  if (GetLeft()->IsIntConstant()) {
-    if (GetRight()->IsIntConstant()) {
-      return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant());
-    } else if (GetRight()->IsLongConstant()) {
-      return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsLongConstant());
-    }
+  if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
+    return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant());
   } else if (GetLeft()->IsLongConstant()) {
     if (GetRight()->IsIntConstant()) {
+      // The binop(long, int) case is only valid for shifts and rotations.
+      DCHECK(IsShl() || IsShr() || IsUShr() || IsRor()) << DebugName();
       return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsIntConstant());
     } else if (GetRight()->IsLongConstant()) {
       return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant());
     }
   } else if (GetLeft()->IsNullConstant() && GetRight()->IsNullConstant()) {
+    // The binop(null, null) case is only valid for equal and not-equal conditions.
+    DCHECK(IsEqual() || IsNotEqual()) << DebugName();
     return Evaluate(GetLeft()->AsNullConstant(), GetRight()->AsNullConstant());
   } else if (kEnableFloatingPointStaticEvaluation) {
     if (GetLeft()->IsFloatConstant() && GetRight()->IsFloatConstant()) {
@@ -1870,13 +1873,49 @@
   DCHECK(block->GetPhis().IsEmpty());
 
   if (block->IsExitBlock()) {
-    exit_block_ = nullptr;
+    SetExitBlock(nullptr);
   }
 
   RemoveElement(reverse_post_order_, block);
   blocks_[block->GetBlockId()] = nullptr;
 }
 
+void HGraph::UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+                                                   HBasicBlock* reference,
+                                                   bool replace_if_back_edge) {
+  if (block->IsLoopHeader()) {
+    // Clear the information of which blocks are contained in that loop. Since the
+    // information is stored as a bit vector based on block ids, we have to update
+    // it, as those block ids were specific to the callee graph and we are now adding
+    // these blocks to the caller graph.
+    block->GetLoopInformation()->ClearAllBlocks();
+  }
+
+  // If not already in a loop, update the loop information.
+  if (!block->IsInLoop()) {
+    block->SetLoopInformation(reference->GetLoopInformation());
+  }
+
+  // If the block is in a loop, update all its outward loops.
+  HLoopInformation* loop_info = block->GetLoopInformation();
+  if (loop_info != nullptr) {
+    for (HLoopInformationOutwardIterator loop_it(*block);
+         !loop_it.Done();
+         loop_it.Advance()) {
+      loop_it.Current()->Add(block);
+    }
+    if (replace_if_back_edge && loop_info->IsBackEdge(*reference)) {
+      loop_info->ReplaceBackEdge(reference, block);
+    }
+  }
+
+  // Copy TryCatchInformation if `reference` is a try block, not if it is a catch block.
+  TryCatchInformation* try_catch_info = reference->IsTryBlock()
+      ? reference->GetTryCatchInformation()
+      : nullptr;
+  block->SetTryCatchInformation(try_catch_info);
+}
+
 HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
   DCHECK(HasExitBlock()) << "Unimplemented scenario";
   // Update the environments in this graph to have the invoke's environment
@@ -1940,34 +1979,6 @@
     at->MergeWithInlined(first);
     exit_block_->ReplaceWith(to);
 
-    // Update all predecessors of the exit block (now the `to` block)
-    // to not `HReturn` but `HGoto` instead.
-    bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
-    if (to->GetPredecessors().size() == 1) {
-      HBasicBlock* predecessor = to->GetPredecessors()[0];
-      HInstruction* last = predecessor->GetLastInstruction();
-      if (!returns_void) {
-        return_value = last->InputAt(0);
-      }
-      predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
-      predecessor->RemoveInstruction(last);
-    } else {
-      if (!returns_void) {
-        // There will be multiple returns.
-        return_value = new (allocator) HPhi(
-            allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
-        to->AddPhi(return_value->AsPhi());
-      }
-      for (HBasicBlock* predecessor : to->GetPredecessors()) {
-        HInstruction* last = predecessor->GetLastInstruction();
-        if (!returns_void) {
-          return_value->AsPhi()->AddInput(last->InputAt(0));
-        }
-        predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
-        predecessor->RemoveInstruction(last);
-      }
-    }
-
     // Update the meta information surrounding blocks:
     // (1) the graph they are now in,
     // (2) the reverse post order of that graph,
@@ -1991,10 +2002,6 @@
     size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
     MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
 
-    HLoopInformation* loop_info = at->GetLoopInformation();
-    // Copy TryCatchInformation if `at` is a try block, not if it is a catch block.
-    TryCatchInformation* try_catch_info = at->IsTryBlock() ? at->GetTryCatchInformation() : nullptr;
-
     // Do a reverse post order of the blocks in the callee and do (1), (2), (3)
     // and (4) to the blocks that apply.
     for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
@@ -2005,23 +2012,7 @@
         current->SetGraph(outer_graph);
         outer_graph->AddBlock(current);
         outer_graph->reverse_post_order_[++index_of_at] = current;
-        if (!current->IsInLoop()) {
-          current->SetLoopInformation(loop_info);
-        } else if (current->IsLoopHeader()) {
-          // Clear the information of which blocks are contained in that loop. Since the
-          // information is stored as a bit vector based on block ids, we have to update
-          // it, as those block ids were specific to the callee graph and we are now adding
-          // these blocks to the caller graph.
-          current->GetLoopInformation()->ClearAllBlocks();
-        }
-        if (current->IsInLoop()) {
-          for (HLoopInformationOutwardIterator loop_it(*current);
-               !loop_it.Done();
-               loop_it.Advance()) {
-            loop_it.Current()->Add(current);
-          }
-        }
-        current->SetTryCatchInformation(try_catch_info);
+        UpdateLoopAndTryInformationOfNewBlock(current, at,  /* replace_if_back_edge */ false);
       }
     }
 
@@ -2029,26 +2020,40 @@
     to->SetGraph(outer_graph);
     outer_graph->AddBlock(to);
     outer_graph->reverse_post_order_[++index_of_at] = to;
-    if (loop_info != nullptr) {
-      if (!to->IsInLoop()) {
-        to->SetLoopInformation(loop_info);
+    // Only `to` can become a back edge, as the inlined blocks
+    // are predecessors of `to`.
+    UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+
+    // Update all predecessors of the exit block (now the `to` block)
+    // to not `HReturn` but `HGoto` instead.
+    bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
+    if (to->GetPredecessors().size() == 1) {
+      HBasicBlock* predecessor = to->GetPredecessors()[0];
+      HInstruction* last = predecessor->GetLastInstruction();
+      if (!returns_void) {
+        return_value = last->InputAt(0);
       }
-      for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
-        loop_it.Current()->Add(to);
+      predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+      predecessor->RemoveInstruction(last);
+    } else {
+      if (!returns_void) {
+        // There will be multiple returns.
+        return_value = new (allocator) HPhi(
+            allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
+        to->AddPhi(return_value->AsPhi());
       }
-      if (loop_info->IsBackEdge(*at)) {
-        // Only `to` can become a back edge, as the inlined blocks
-        // are predecessors of `to`.
-        loop_info->ReplaceBackEdge(at, to);
+      for (HBasicBlock* predecessor : to->GetPredecessors()) {
+        HInstruction* last = predecessor->GetLastInstruction();
+        if (!returns_void) {
+          DCHECK(last->IsReturn());
+          return_value->AsPhi()->AddInput(last->InputAt(0));
+        }
+        predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+        predecessor->RemoveInstruction(last);
       }
     }
-    to->SetTryCatchInformation(try_catch_info);
   }
 
-  // Update the next instruction id of the outer graph, so that instructions
-  // added later get bigger ids than those in the inner graph.
-  outer_graph->SetCurrentInstructionId(GetNextInstructionId());
-
   // Walk over the entry block and:
   // - Move constants from the entry block to the outer_graph's entry block,
   // - Replace HParameterValue instructions with their real value.
@@ -2157,32 +2162,17 @@
   reverse_post_order_[index_of_header++] = false_block;
   reverse_post_order_[index_of_header++] = new_pre_header;
 
-  // Fix loop information.
-  HLoopInformation* loop_info = old_pre_header->GetLoopInformation();
-  if (loop_info != nullptr) {
-    if_block->SetLoopInformation(loop_info);
-    true_block->SetLoopInformation(loop_info);
-    false_block->SetLoopInformation(loop_info);
-    new_pre_header->SetLoopInformation(loop_info);
-    // Add blocks to all enveloping loops.
-    for (HLoopInformationOutwardIterator loop_it(*old_pre_header);
-         !loop_it.Done();
-         loop_it.Advance()) {
-      loop_it.Current()->Add(if_block);
-      loop_it.Current()->Add(true_block);
-      loop_it.Current()->Add(false_block);
-      loop_it.Current()->Add(new_pre_header);
-    }
-  }
-
-  // Fix try/catch information.
-  TryCatchInformation* try_catch_info = old_pre_header->IsTryBlock()
-      ? old_pre_header->GetTryCatchInformation()
-      : nullptr;
-  if_block->SetTryCatchInformation(try_catch_info);
-  true_block->SetTryCatchInformation(try_catch_info);
-  false_block->SetTryCatchInformation(try_catch_info);
-  new_pre_header->SetTryCatchInformation(try_catch_info);
+  // The pre_header can never be a back edge of a loop.
+  DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
+         !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
+  UpdateLoopAndTryInformationOfNewBlock(
+      if_block, old_pre_header, /* replace_if_back_edge */ false);
+  UpdateLoopAndTryInformationOfNewBlock(
+      true_block, old_pre_header, /* replace_if_back_edge */ false);
+  UpdateLoopAndTryInformationOfNewBlock(
+      false_block, old_pre_header, /* replace_if_back_edge */ false);
+  UpdateLoopAndTryInformationOfNewBlock(
+      new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
 }
 
 static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
@@ -2206,7 +2196,8 @@
       CheckAgainstUpperBound(rti, AsBoundType()->GetUpperBound());
     }
   }
-  reference_type_info_ = rti;
+  reference_type_handle_ = rti.GetTypeHandle();
+  SetPackedFlag<kFlagReferenceTypeIsExact>(rti.IsExact());
 }
 
 void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) {
@@ -2217,17 +2208,15 @@
     CheckAgainstUpperBound(GetReferenceTypeInfo(), upper_bound);
   }
   upper_bound_ = upper_bound;
-  upper_can_be_null_ = can_be_null;
+  SetPackedFlag<kFlagUpperCanBeNull>(can_be_null);
 }
 
-ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
-
-ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
-    : type_handle_(type_handle), is_exact_(is_exact) {
+ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exact) {
   if (kIsDebugBuild) {
     ScopedObjectAccess soa(Thread::Current());
     DCHECK(IsValidHandle(type_handle));
   }
+  return ReferenceTypeInfo(type_handle, is_exact);
 }
 
 std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 399afab..b684cc6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -154,8 +154,9 @@
  public:
   typedef Handle<mirror::Class> TypeHandle;
 
-  static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) {
-    // The constructor will check that the type_handle is valid.
+  static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
+
+  static ReferenceTypeInfo CreateUnchecked(TypeHandle type_handle, bool is_exact) {
     return ReferenceTypeInfo(type_handle, is_exact);
   }
 
@@ -254,8 +255,9 @@
   }
 
  private:
-  ReferenceTypeInfo();
-  ReferenceTypeInfo(TypeHandle type_handle, bool is_exact);
+  ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
+  ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
+      : type_handle_(type_handle), is_exact_(is_exact) { }
 
   // The class of the object.
   TypeHandle type_handle_;
@@ -351,6 +353,13 @@
   // and removing the invoke instruction.
   HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
 
+  // Update the loop and try membership of `block`, which was spawned from `reference`.
+  // In case `reference` is a back edge, `replace_if_back_edge` notifies whether `block`
+  // should be the new back edge.
+  void UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+                                             HBasicBlock* reference,
+                                             bool replace_if_back_edge);
+
   // Need to add a couple of blocks to test if the loop body is entered and
   // put deoptimization instructions, etc.
   void TransformLoopHeaderForBCE(HBasicBlock* header);
@@ -378,6 +387,7 @@
   }
 
   void SetCurrentInstructionId(int32_t id) {
+    DCHECK_GE(id, current_instruction_id_);
     current_instruction_id_ = id;
   }
 
@@ -1247,6 +1257,16 @@
   M(UShr, BinaryOperation)                                              \
   M(Xor, BinaryOperation)                                               \
 
+/*
+ * Instructions, shared across several (not all) architectures.
+ */
+#if !defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_ENABLE_CODEGEN_arm64)
+#define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M)                         \
+  M(MultiplyAccumulate, Instruction)
+#endif
+
 #ifndef ART_ENABLE_CODEGEN_arm
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
 #else
@@ -1258,9 +1278,9 @@
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
 #else
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                          \
+  M(Arm64BitwiseNegatedRight, Instruction)                              \
   M(Arm64DataProcWithShifterOp, Instruction)                            \
-  M(Arm64IntermediateAddress, Instruction)                              \
-  M(Arm64MultiplyAccumulate, Instruction)
+  M(Arm64IntermediateAddress, Instruction)
 #endif
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
@@ -1281,6 +1301,7 @@
 
 #define FOR_EACH_CONCRETE_INSTRUCTION(M)                                \
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M)                               \
+  FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)                                  \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                                \
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)                                 \
@@ -1837,13 +1858,15 @@
         dex_pc_(dex_pc),
         id_(-1),
         ssa_index_(-1),
-        emitted_at_use_site_(false),
+        packed_fields_(0u),
         environment_(nullptr),
         locations_(nullptr),
         live_interval_(nullptr),
         lifetime_position_(kNoLifetime),
         side_effects_(side_effects),
-        reference_type_info_(ReferenceTypeInfo::CreateInvalid()) {}
+        reference_type_handle_(ReferenceTypeInfo::CreateInvalid().GetTypeHandle()) {
+    SetPackedFlag<kFlagReferenceTypeIsExact>(ReferenceTypeInfo::CreateInvalid().IsExact());
+  }
 
   virtual ~HInstruction() {}
 
@@ -1912,7 +1935,8 @@
 
   ReferenceTypeInfo GetReferenceTypeInfo() const {
     DCHECK_EQ(GetType(), Primitive::kPrimNot);
-    return reference_type_info_;
+    return ReferenceTypeInfo::CreateUnchecked(reference_type_handle_,
+                                              GetPackedFlag<kFlagReferenceTypeIsExact>());;
   }
 
   void AddUseAt(HInstruction* user, size_t index) {
@@ -2101,13 +2125,45 @@
   // The caller must ensure that this is safe to do.
   void RemoveEnvironmentUsers();
 
-  bool IsEmittedAtUseSite() const { return emitted_at_use_site_; }
-  void MarkEmittedAtUseSite() { emitted_at_use_site_ = true; }
+  bool IsEmittedAtUseSite() const { return GetPackedFlag<kFlagEmittedAtUseSite>(); }
+  void MarkEmittedAtUseSite() { SetPackedFlag<kFlagEmittedAtUseSite>(true); }
 
  protected:
+  // If set, the machine code for this instruction is assumed to be generated by
+  // its users. Used by liveness analysis to compute use positions accordingly.
+  static constexpr size_t kFlagEmittedAtUseSite = 0u;
+  static constexpr size_t kFlagReferenceTypeIsExact = kFlagEmittedAtUseSite + 1;
+  static constexpr size_t kNumberOfGenericPackedBits = kFlagReferenceTypeIsExact + 1;
+  static constexpr size_t kMaxNumberOfPackedBits = sizeof(uint32_t) * kBitsPerByte;
+
   virtual const HUserRecord<HInstruction*> InputRecordAt(size_t i) const = 0;
   virtual void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) = 0;
 
+  uint32_t GetPackedFields() const {
+    return packed_fields_;
+  }
+
+  template <size_t flag>
+  bool GetPackedFlag() const {
+    return (packed_fields_ & (1u << flag)) != 0u;
+  }
+
+  template <size_t flag>
+  void SetPackedFlag(bool value = true) {
+    packed_fields_ = (packed_fields_ & ~(1u << flag)) | ((value ? 1u : 0u) << flag);
+  }
+
+  template <typename BitFieldType>
+  typename BitFieldType::value_type GetPackedField() const {
+    return BitFieldType::Decode(packed_fields_);
+  }
+
+  template <typename BitFieldType>
+  void SetPackedField(typename BitFieldType::value_type value) {
+    DCHECK(IsUint<BitFieldType::size>(static_cast<uintptr_t>(value)));
+    packed_fields_ = BitFieldType::Update(value, packed_fields_);
+  }
+
  private:
   void RemoveEnvironmentUser(HUseListNode<HEnvironment*>* use_node) { env_uses_.Remove(use_node); }
 
@@ -2124,9 +2180,8 @@
   // When doing liveness analysis, instructions that have uses get an SSA index.
   int ssa_index_;
 
-  // If set, the machine code for this instruction is assumed to be generated by
-  // its users. Used by liveness analysis to compute use positions accordingly.
-  bool emitted_at_use_site_;
+  // Packed fields.
+  uint32_t packed_fields_;
 
   // List of instructions that have this instruction as input.
   HUseList<HInstruction*> uses_;
@@ -2150,8 +2205,10 @@
 
   SideEffects side_effects_;
 
+  // The reference handle part of the reference type info.
+  // The IsExact() flag is stored in packed fields.
   // TODO: for primitive types this should be marked as invalid.
-  ReferenceTypeInfo reference_type_info_;
+  ReferenceTypeInfo::TypeHandle reference_type_handle_;
 
   friend class GraphChecker;
   friend class HBasicBlock;
@@ -2277,13 +2334,23 @@
 class HExpression : public HTemplateInstruction<N> {
  public:
   HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc)
-      : HTemplateInstruction<N>(side_effects, dex_pc), type_(type) {}
+      : HTemplateInstruction<N>(side_effects, dex_pc) {
+    this->template SetPackedField<TypeField>(type);
+  }
   virtual ~HExpression() {}
 
-  Primitive::Type GetType() const OVERRIDE { return type_; }
+  Primitive::Type GetType() const OVERRIDE {
+    return TypeField::Decode(this->GetPackedFields());
+  }
 
  protected:
-  Primitive::Type type_;
+  static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kNumberOfExpressionPackedBits = kFieldType + kFieldTypeSize;
+  static_assert(kNumberOfExpressionPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>;
 };
 
 // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
@@ -2573,13 +2640,16 @@
 // higher indices in no particular order.
 class HTryBoundary : public HTemplateInstruction<0> {
  public:
-  enum BoundaryKind {
+  enum class BoundaryKind {
     kEntry,
     kExit,
+    kLast = kExit
   };
 
   explicit HTryBoundary(BoundaryKind kind, uint32_t dex_pc = kNoDexPc)
-      : HTemplateInstruction(SideEffects::None(), dex_pc), kind_(kind) {}
+      : HTemplateInstruction(SideEffects::None(), dex_pc) {
+    SetPackedField<BoundaryKindField>(kind);
+  }
 
   bool IsControlFlow() const OVERRIDE { return true; }
 
@@ -2605,14 +2675,22 @@
     }
   }
 
-  bool IsEntry() const { return kind_ == BoundaryKind::kEntry; }
+  BoundaryKind GetBoundaryKind() const { return GetPackedField<BoundaryKindField>(); }
+  bool IsEntry() const { return GetBoundaryKind() == BoundaryKind::kEntry; }
 
   bool HasSameExceptionHandlersAs(const HTryBoundary& other) const;
 
   DECLARE_INSTRUCTION(TryBoundary);
 
  private:
-  const BoundaryKind kind_;
+  static constexpr size_t kFieldBoundaryKind = kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldBoundaryKindSize =
+      MinimumBitsToStore(static_cast<size_t>(BoundaryKind::kLast));
+  static constexpr size_t kNumberOfTryBoundaryPackedBits =
+      kFieldBoundaryKind + kFieldBoundaryKindSize;
+  static_assert(kNumberOfTryBoundaryPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using BoundaryKindField = BitField<BoundaryKind, kFieldBoundaryKind, kFieldBoundaryKindSize>;
 
   DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
 };
@@ -2658,9 +2736,10 @@
 // of a class.
 class HClassTableGet : public HExpression<1> {
  public:
-  enum TableKind {
+  enum class TableKind {
     kVTable,
     kIMTable,
+    kLast = kIMTable
   };
   HClassTableGet(HInstruction* cls,
                  Primitive::Type type,
@@ -2668,26 +2747,33 @@
                  size_t index,
                  uint32_t dex_pc)
       : HExpression(type, SideEffects::None(), dex_pc),
-        index_(index),
-        table_kind_(kind) {
+        index_(index) {
+    SetPackedField<TableKindField>(kind);
     SetRawInputAt(0, cls);
   }
 
   bool CanBeMoved() const OVERRIDE { return true; }
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return other->AsClassTableGet()->GetIndex() == index_ &&
-        other->AsClassTableGet()->GetTableKind() == table_kind_;
+        other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
   }
 
-  TableKind GetTableKind() const { return table_kind_; }
+  TableKind GetTableKind() const { return GetPackedField<TableKindField>(); }
   size_t GetIndex() const { return index_; }
 
   DECLARE_INSTRUCTION(ClassTableGet);
 
  private:
+  static constexpr size_t kFieldTableKind = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFieldTableKindSize =
+      MinimumBitsToStore(static_cast<size_t>(TableKind::kLast));
+  static constexpr size_t kNumberOfClassTableGetPackedBits = kFieldTableKind + kFieldTableKindSize;
+  static_assert(kNumberOfClassTableGetPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using TableKindField = BitField<TableKind, kFieldTableKind, kFieldTableKind>;
+
   // The index of the ArtMethod in the table.
   const size_t index_;
-  const TableKind table_kind_;
 
   DISALLOW_COPY_AND_ASSIGN(HClassTableGet);
 };
@@ -2821,20 +2907,15 @@
   // Apply this operation to `x` and `y`.
   virtual HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
                               HNullConstant* y ATTRIBUTE_UNUSED) const {
-    VLOG(compiler) << DebugName() << " is not defined for the (null, null) case.";
-    return nullptr;
+    LOG(FATAL) << DebugName() << " is not defined for the (null, null) case.";
+    UNREACHABLE();
   }
   virtual HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const = 0;
   virtual HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const = 0;
-  virtual HConstant* Evaluate(HIntConstant* x ATTRIBUTE_UNUSED,
-                              HLongConstant* y ATTRIBUTE_UNUSED) const {
-    VLOG(compiler) << DebugName() << " is not defined for the (int, long) case.";
-    return nullptr;
-  }
   virtual HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED,
                               HIntConstant* y ATTRIBUTE_UNUSED) const {
-    VLOG(compiler) << DebugName() << " is not defined for the (long, int) case.";
-    return nullptr;
+    LOG(FATAL) << DebugName() << " is not defined for the (long, int) case.";
+    UNREACHABLE();
   }
   virtual HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const = 0;
   virtual HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const = 0;
@@ -2859,6 +2940,7 @@
   kNoBias,  // bias is not applicable (i.e. for long operation)
   kGtBias,  // return 1 for NaN comparisons
   kLtBias,  // return -1 for NaN comparisons
+  kLast = kLtBias
 };
 
 std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs);
@@ -2866,8 +2948,9 @@
 class HCondition : public HBinaryOperation {
  public:
   HCondition(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
-      : HBinaryOperation(Primitive::kPrimBoolean, first, second, SideEffects::None(), dex_pc),
-        bias_(ComparisonBias::kNoBias) {}
+      : HBinaryOperation(Primitive::kPrimBoolean, first, second, SideEffects::None(), dex_pc) {
+    SetPackedField<ComparisonBiasField>(ComparisonBias::kNoBias);
+  }
 
   // For code generation purposes, returns whether this instruction is just before
   // `instruction`, and disregard moves in between.
@@ -2879,12 +2962,12 @@
 
   virtual IfCondition GetOppositeCondition() const = 0;
 
-  bool IsGtBias() const { return bias_ == ComparisonBias::kGtBias; }
-  ComparisonBias GetBias() const { return bias_; }
-  void SetBias(ComparisonBias bias) { bias_ = bias; }
+  bool IsGtBias() const { return GetBias() == ComparisonBias::kGtBias; }
+  ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
+  void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
 
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
-    return bias_ == other->AsCondition()->bias_;
+    return GetPackedFields() == other->AsCondition()->GetPackedFields();
   }
 
   bool IsFPConditionTrueIfNaN() const {
@@ -2900,6 +2983,16 @@
   }
 
  protected:
+  // Needed if we merge a HCompare into a HCondition.
+  static constexpr size_t kFieldComparisonBias = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFieldComparisonBiasSize =
+      MinimumBitsToStore(static_cast<size_t>(ComparisonBias::kLast));
+  static constexpr size_t kNumberOfConditionPackedBits =
+      kFieldComparisonBias + kFieldComparisonBiasSize;
+  static_assert(kNumberOfConditionPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using ComparisonBiasField =
+      BitField<ComparisonBias, kFieldComparisonBias, kFieldComparisonBiasSize>;
+
   template <typename T>
   int32_t Compare(T x, T y) const { return x > y ? 1 : (x < y ? -1 : 0); }
 
@@ -2917,9 +3010,6 @@
   }
 
  private:
-  // Needed if we merge a HCompare into a HCondition.
-  ComparisonBias bias_;
-
   DISALLOW_COPY_AND_ASSIGN(HCondition);
 };
 
@@ -3332,8 +3422,8 @@
                          first,
                          second,
                          SideEffectsForArchRuntimeCalls(type),
-                         dex_pc),
-        bias_(bias) {
+                         dex_pc) {
+    SetPackedField<ComparisonBiasField>(bias);
     DCHECK_EQ(type, first->GetType());
     DCHECK_EQ(type, second->GetType());
   }
@@ -3368,16 +3458,16 @@
   }
 
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
-    return bias_ == other->AsCompare()->bias_;
+    return GetPackedFields() == other->AsCompare()->GetPackedFields();
   }
 
-  ComparisonBias GetBias() const { return bias_; }
+  ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
 
   // Does this compare instruction have a "gt bias" (vs an "lt bias")?
-  // Only meaninfgul for floating-point comparisons.
+  // Only meaningful for floating-point comparisons.
   bool IsGtBias() const {
     DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
-    return bias_ == ComparisonBias::kGtBias;
+    return GetBias() == ComparisonBias::kGtBias;
   }
 
   static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type) {
@@ -3388,6 +3478,15 @@
   DECLARE_INSTRUCTION(Compare);
 
  protected:
+  static constexpr size_t kFieldComparisonBias = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFieldComparisonBiasSize =
+      MinimumBitsToStore(static_cast<size_t>(ComparisonBias::kLast));
+  static constexpr size_t kNumberOfComparePackedBits =
+      kFieldComparisonBias + kFieldComparisonBiasSize;
+  static_assert(kNumberOfComparePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using ComparisonBiasField =
+      BitField<ComparisonBias, kFieldComparisonBias, kFieldComparisonBiasSize>;
+
   // Return an integer constant containing the result of a comparison evaluated at compile time.
   HIntConstant* MakeConstantComparison(int32_t value, uint32_t dex_pc) const {
     DCHECK(value == -1 || value == 0 || value == 1) << value;
@@ -3395,8 +3494,6 @@
   }
 
  private:
-  const ComparisonBias bias_;
-
   DISALLOW_COPY_AND_ASSIGN(HCompare);
 };
 
@@ -3464,9 +3561,9 @@
       : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
-        can_throw_(can_throw),
-        finalizable_(finalizable),
         entrypoint_(entrypoint) {
+    SetPackedFlag<kFlagCanThrow>(can_throw);
+    SetPackedFlag<kFlagFinalizable>(finalizable);
     SetRawInputAt(0, cls);
     SetRawInputAt(1, current_method);
   }
@@ -3480,9 +3577,9 @@
   // It may throw when called on type that's not instantiable/accessible.
   // It can throw OOME.
   // TODO: distinguish between the two cases so we can for example allow allocation elimination.
-  bool CanThrow() const OVERRIDE { return can_throw_ || true; }
+  bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>() || true; }
 
-  bool IsFinalizable() const { return finalizable_; }
+  bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
 
   bool CanBeNull() const OVERRIDE { return false; }
 
@@ -3497,10 +3594,14 @@
   DECLARE_INSTRUCTION(NewInstance);
 
  private:
+  static constexpr size_t kFlagCanThrow = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFlagFinalizable = kFlagCanThrow + 1;
+  static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
+  static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+
   const uint16_t type_index_;
   const DexFile& dex_file_;
-  const bool can_throw_;
-  const bool finalizable_;
   QuickEntrypointEnum entrypoint_;
 
   DISALLOW_COPY_AND_ASSIGN(HNewInstance);
@@ -3550,12 +3651,14 @@
   // inputs at the end of their list of inputs.
   uint32_t GetNumberOfArguments() const { return number_of_arguments_; }
 
-  Primitive::Type GetType() const OVERRIDE { return return_type_; }
+  Primitive::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); }
 
   uint32_t GetDexMethodIndex() const { return dex_method_index_; }
   const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
 
-  InvokeType GetOriginalInvokeType() const { return original_invoke_type_; }
+  InvokeType GetOriginalInvokeType() const {
+    return GetPackedField<OriginalInvokeTypeField>();
+  }
 
   Intrinsics GetIntrinsic() const {
     return intrinsic_;
@@ -3570,7 +3673,7 @@
     return GetEnvironment()->IsFromInlinedInvoke();
   }
 
-  bool CanThrow() const OVERRIDE { return can_throw_; }
+  bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
 
   bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); }
 
@@ -3591,6 +3694,20 @@
   DECLARE_ABSTRACT_INSTRUCTION(Invoke);
 
  protected:
+  static constexpr size_t kFieldOriginalInvokeType = kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldOriginalInvokeTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(kMaxInvokeType));
+  static constexpr size_t kFieldReturnType =
+      kFieldOriginalInvokeType + kFieldOriginalInvokeTypeSize;
+  static constexpr size_t kFieldReturnTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kFlagCanThrow = kFieldReturnType + kFieldReturnTypeSize;
+  static constexpr size_t kNumberOfInvokePackedBits = kFlagCanThrow + 1;
+  static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using OriginalInvokeTypeField =
+      BitField<InvokeType, kFieldOriginalInvokeType, kFieldOriginalInvokeTypeSize>;
+  using ReturnTypeField = BitField<Primitive::Type, kFieldReturnType, kFieldReturnTypeSize>;
+
   HInvoke(ArenaAllocator* arena,
           uint32_t number_of_arguments,
           uint32_t number_of_other_inputs,
@@ -3603,12 +3720,12 @@
       number_of_arguments_(number_of_arguments),
       inputs_(number_of_arguments + number_of_other_inputs,
               arena->Adapter(kArenaAllocInvokeInputs)),
-      return_type_(return_type),
       dex_method_index_(dex_method_index),
-      original_invoke_type_(original_invoke_type),
-      can_throw_(true),
       intrinsic_(Intrinsics::kNone),
       intrinsic_optimizations_(0) {
+    SetPackedField<ReturnTypeField>(return_type);
+    SetPackedField<OriginalInvokeTypeField>(original_invoke_type);
+    SetPackedFlag<kFlagCanThrow>(true);
   }
 
   const HUserRecord<HInstruction*> InputRecordAt(size_t index) const OVERRIDE {
@@ -3619,14 +3736,11 @@
     inputs_[index] = input;
   }
 
-  void SetCanThrow(bool can_throw) { can_throw_ = can_throw; }
+  void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
 
   uint32_t number_of_arguments_;
   ArenaVector<HUserRecord<HInstruction*>> inputs_;
-  const Primitive::Type return_type_;
   const uint32_t dex_method_index_;
-  const InvokeType original_invoke_type_;
-  bool can_throw_;
   Intrinsics intrinsic_;
 
   // A magic word holding optimizations for intrinsics. See intrinsics.h.
@@ -3667,6 +3781,7 @@
     kNone,      // Class already initialized.
     kExplicit,  // Static call having explicit clinit check as last input.
     kImplicit,  // Static call implicitly requiring a clinit check.
+    kLast = kImplicit
   };
 
   // Determines how to load the target ArtMethod*.
@@ -3687,7 +3802,7 @@
     // the image relocatable or not.
     kDirectAddressWithFixup,
 
-    // Load from resoved methods array in the dex cache using a PC-relative load.
+    // Load from resolved methods array in the dex cache using a PC-relative load.
     // Used when we need to use the dex cache, for example for invoke-static that
     // may cause class initialization (the entry may point to a resolution method),
     // and we know that we can access the dex cache arrays using a PC-relative load.
@@ -3759,10 +3874,11 @@
                 dex_pc,
                 method_index,
                 original_invoke_type),
-        optimized_invoke_type_(optimized_invoke_type),
-        clinit_check_requirement_(clinit_check_requirement),
         target_method_(target_method),
-        dispatch_info_(dispatch_info) { }
+        dispatch_info_(dispatch_info) {
+    SetPackedField<OptimizedInvokeTypeField>(optimized_invoke_type);
+    SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
+  }
 
   void SetDispatchInfo(const DispatchInfo& dispatch_info) {
     bool had_current_method_input = HasCurrentMethodInput();
@@ -3794,7 +3910,7 @@
   }
 
   bool CanBeNull() const OVERRIDE {
-    return return_type_ == Primitive::kPrimNot && !IsStringInit();
+    return GetPackedField<ReturnTypeField>() == Primitive::kPrimNot && !IsStringInit();
   }
 
   // Get the index of the special input, if any.
@@ -3805,9 +3921,12 @@
   uint32_t GetSpecialInputIndex() const { return GetNumberOfArguments(); }
   bool HasSpecialInput() const { return GetNumberOfArguments() != InputCount(); }
 
-  InvokeType GetOptimizedInvokeType() const { return optimized_invoke_type_; }
+  InvokeType GetOptimizedInvokeType() const {
+    return GetPackedField<OptimizedInvokeTypeField>();
+  }
+
   void SetOptimizedInvokeType(InvokeType invoke_type) {
-    optimized_invoke_type_ = invoke_type;
+    SetPackedField<OptimizedInvokeTypeField>(invoke_type);
   }
 
   MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
@@ -3854,7 +3973,9 @@
     return dispatch_info_.direct_code_ptr;
   }
 
-  ClinitCheckRequirement GetClinitCheckRequirement() const { return clinit_check_requirement_; }
+  ClinitCheckRequirement GetClinitCheckRequirement() const {
+    return GetPackedField<ClinitCheckRequirementField>();
+  }
 
   // Is this instruction a call to a static method?
   bool IsStatic() const {
@@ -3872,7 +3993,7 @@
     DCHECK(last_input->IsLoadClass() || last_input->IsClinitCheck()) << last_input->DebugName();
     RemoveAsUserOfInput(last_input_index);
     inputs_.pop_back();
-    clinit_check_requirement_ = new_requirement;
+    SetPackedField<ClinitCheckRequirementField>(new_requirement);
     DCHECK(!IsStaticWithExplicitClinitCheck());
   }
 
@@ -3888,13 +4009,13 @@
   // Is this a call to a static method whose declaring class has an
   // explicit initialization check in the graph?
   bool IsStaticWithExplicitClinitCheck() const {
-    return IsStatic() && (clinit_check_requirement_ == ClinitCheckRequirement::kExplicit);
+    return IsStatic() && (GetClinitCheckRequirement() == ClinitCheckRequirement::kExplicit);
   }
 
   // Is this a call to a static method whose declaring class has an
   // implicit intialization check requirement?
   bool IsStaticWithImplicitClinitCheck() const {
-    return IsStatic() && (clinit_check_requirement_ == ClinitCheckRequirement::kImplicit);
+    return IsStatic() && (GetClinitCheckRequirement() == ClinitCheckRequirement::kImplicit);
   }
 
   // Does this method load kind need the current method as an input?
@@ -3923,8 +4044,23 @@
   void RemoveInputAt(size_t index);
 
  private:
-  InvokeType optimized_invoke_type_;
-  ClinitCheckRequirement clinit_check_requirement_;
+  static constexpr size_t kFieldOptimizedInvokeType = kNumberOfInvokePackedBits;
+  static constexpr size_t kFieldOptimizedInvokeTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(kMaxInvokeType));
+  static constexpr size_t kFieldClinitCheckRequirement =
+      kFieldOptimizedInvokeType + kFieldOptimizedInvokeTypeSize;
+  static constexpr size_t kFieldClinitCheckRequirementSize =
+      MinimumBitsToStore(static_cast<size_t>(ClinitCheckRequirement::kLast));
+  static constexpr size_t kNumberOfInvokeStaticOrDirectPackedBits =
+      kFieldClinitCheckRequirement + kFieldClinitCheckRequirementSize;
+  static_assert(kNumberOfInvokeStaticOrDirectPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using OptimizedInvokeTypeField =
+      BitField<InvokeType, kFieldOptimizedInvokeType, kFieldOptimizedInvokeTypeSize>;
+  using ClinitCheckRequirementField = BitField<ClinitCheckRequirement,
+                                               kFieldClinitCheckRequirement,
+                                               kFieldClinitCheckRequirementSize>;
+
   // The target method may refer to different dex file or method index than the original
   // invoke. This happens for sharpened calls and for calls where a method was redeclared
   // in derived class to increase visibility.
@@ -4305,8 +4441,6 @@
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
   }
-  // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
-  // case is handled as `x << static_cast<int>(y)`.
   HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
@@ -4351,8 +4485,6 @@
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
   }
-  // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
-  // case is handled as `x >> static_cast<int>(y)`.
   HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
@@ -4398,8 +4530,6 @@
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
   }
-  // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
-  // case is handled as `x >>> static_cast<int>(y)`.
   HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
@@ -4435,21 +4565,12 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  template <typename T, typename U>
-  auto Compute(T x, U y) const -> decltype(x & y) { return x & y; }
+  template <typename T> T Compute(T x, T y) const { return x & y; }
 
   HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
-  HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
   HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -4481,21 +4602,12 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  template <typename T, typename U>
-  auto Compute(T x, U y) const -> decltype(x | y) { return x | y; }
+  template <typename T> T Compute(T x, T y) const { return x | y; }
 
   HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
-  HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
   HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -4527,21 +4639,12 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  template <typename T, typename U>
-  auto Compute(T x, U y) const -> decltype(x ^ y) { return x ^ y; }
+  template <typename T> T Compute(T x, T y) const { return x ^ y; }
 
   HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
-  HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
-    return GetBlock()->GetGraph()->GetLongConstant(
-        Compute(x->GetValue(), y->GetValue()), GetDexPc());
-  }
   HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -4623,32 +4726,35 @@
       : HExpression(parameter_type, SideEffects::None(), kNoDexPc),
         dex_file_(dex_file),
         type_index_(type_index),
-        index_(index),
-        is_this_(is_this),
-        can_be_null_(!is_this) {}
+        index_(index) {
+    SetPackedFlag<kFlagIsThis>(is_this);
+    SetPackedFlag<kFlagCanBeNull>(!is_this);
+  }
 
   const DexFile& GetDexFile() const { return dex_file_; }
   uint16_t GetTypeIndex() const { return type_index_; }
   uint8_t GetIndex() const { return index_; }
-  bool IsThis() const { return is_this_; }
+  bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
 
-  bool CanBeNull() const OVERRIDE { return can_be_null_; }
-  void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
+  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   DECLARE_INSTRUCTION(ParameterValue);
 
  private:
+  // Whether or not the parameter value corresponds to 'this' argument.
+  static constexpr size_t kFlagIsThis = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFlagCanBeNull = kFlagIsThis + 1;
+  static constexpr size_t kNumberOfParameterValuePackedBits = kFlagCanBeNull + 1;
+  static_assert(kNumberOfParameterValuePackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+
   const DexFile& dex_file_;
   const uint16_t type_index_;
   // The index of this parameter in the parameters list. Must be less
   // than HGraph::number_of_in_vregs_.
   const uint8_t index_;
 
-  // Whether or not the parameter value corresponds to 'this' argument.
-  const bool is_this_;
-
-  bool can_be_null_;
-
   DISALLOW_COPY_AND_ASSIGN(HParameterValue);
 };
 
@@ -4773,14 +4879,14 @@
        uint32_t dex_pc = kNoDexPc)
       : HInstruction(SideEffects::None(), dex_pc),
         inputs_(number_of_inputs, arena->Adapter(kArenaAllocPhiInputs)),
-        reg_number_(reg_number),
-        type_(ToPhiType(type)),
-        // Phis are constructed live and marked dead if conflicting or unused.
-        // Individual steps of SsaBuilder should assume that if a phi has been
-        // marked dead, it can be ignored and will be removed by SsaPhiElimination.
-        is_live_(true),
-        can_be_null_(true) {
-    DCHECK_NE(type_, Primitive::kPrimVoid);
+        reg_number_(reg_number) {
+    SetPackedField<TypeField>(ToPhiType(type));
+    DCHECK_NE(GetType(), Primitive::kPrimVoid);
+    // Phis are constructed live and marked dead if conflicting or unused.
+    // Individual steps of SsaBuilder should assume that if a phi has been
+    // marked dead, it can be ignored and will be removed by SsaPhiElimination.
+    SetPackedFlag<kFlagIsLive>(true);
+    SetPackedFlag<kFlagCanBeNull>(true);
   }
 
   // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
@@ -4803,27 +4909,27 @@
   void AddInput(HInstruction* input);
   void RemoveInputAt(size_t index);
 
-  Primitive::Type GetType() const OVERRIDE { return type_; }
+  Primitive::Type GetType() const OVERRIDE { return GetPackedField<TypeField>(); }
   void SetType(Primitive::Type new_type) {
     // Make sure that only valid type changes occur. The following are allowed:
     //  (1) int  -> float/ref (primitive type propagation),
     //  (2) long -> double (primitive type propagation).
-    DCHECK(type_ == new_type ||
-           (type_ == Primitive::kPrimInt && new_type == Primitive::kPrimFloat) ||
-           (type_ == Primitive::kPrimInt && new_type == Primitive::kPrimNot) ||
-           (type_ == Primitive::kPrimLong && new_type == Primitive::kPrimDouble));
-    type_ = new_type;
+    DCHECK(GetType() == new_type ||
+           (GetType() == Primitive::kPrimInt && new_type == Primitive::kPrimFloat) ||
+           (GetType() == Primitive::kPrimInt && new_type == Primitive::kPrimNot) ||
+           (GetType() == Primitive::kPrimLong && new_type == Primitive::kPrimDouble));
+    SetPackedField<TypeField>(new_type);
   }
 
-  bool CanBeNull() const OVERRIDE { return can_be_null_; }
-  void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
+  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   uint32_t GetRegNumber() const { return reg_number_; }
 
-  void SetDead() { is_live_ = false; }
-  void SetLive() { is_live_ = true; }
-  bool IsDead() const { return !is_live_; }
-  bool IsLive() const { return is_live_; }
+  void SetDead() { SetPackedFlag<kFlagIsLive>(false); }
+  void SetLive() { SetPackedFlag<kFlagIsLive>(true); }
+  bool IsDead() const { return !IsLive(); }
+  bool IsLive() const { return GetPackedFlag<kFlagIsLive>(); }
 
   bool IsVRegEquivalentOf(HInstruction* other) const {
     return other != nullptr
@@ -4858,11 +4964,17 @@
   }
 
  private:
+  static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kFlagIsLive = kFieldType + kFieldTypeSize;
+  static constexpr size_t kFlagCanBeNull = kFlagIsLive + 1;
+  static constexpr size_t kNumberOfPhiPackedBits = kFlagCanBeNull + 1;
+  static_assert(kNumberOfPhiPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>;
+
   ArenaVector<HUserRecord<HInstruction*> > inputs_;
   const uint32_t reg_number_;
-  Primitive::Type type_;
-  bool is_live_;
-  bool can_be_null_;
 
   DISALLOW_COPY_AND_ASSIGN(HPhi);
 };
@@ -5001,8 +5113,8 @@
                     field_idx,
                     declaring_class_def_index,
                     dex_file,
-                    dex_cache),
-        value_can_be_null_(true) {
+                    dex_cache) {
+    SetPackedFlag<kFlagValueCanBeNull>(true);
     SetRawInputAt(0, object);
     SetRawInputAt(1, value);
   }
@@ -5016,14 +5128,18 @@
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
   bool IsVolatile() const { return field_info_.IsVolatile(); }
   HInstruction* GetValue() const { return InputAt(1); }
-  bool GetValueCanBeNull() const { return value_can_be_null_; }
-  void ClearValueCanBeNull() { value_can_be_null_ = false; }
+  bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
+  void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
 
   DECLARE_INSTRUCTION(InstanceFieldSet);
 
  private:
+  static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
+  static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1;
+  static_assert(kNumberOfInstanceFieldSetPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+
   const FieldInfo field_info_;
-  bool value_can_be_null_;
 
   DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
 };
@@ -5092,11 +5208,11 @@
             SideEffects::ArrayWriteOfType(expected_component_type).Union(
                 SideEffectsForArchRuntimeCalls(value->GetType())).Union(
                     additional_side_effects),
-            dex_pc),
-        expected_component_type_(expected_component_type),
-        needs_type_check_(value->GetType() == Primitive::kPrimNot),
-        value_can_be_null_(true),
-        static_type_of_array_is_object_array_(false) {
+            dex_pc) {
+    SetPackedField<ExpectedComponentTypeField>(expected_component_type);
+    SetPackedFlag<kFlagNeedsTypeCheck>(value->GetType() == Primitive::kPrimNot);
+    SetPackedFlag<kFlagValueCanBeNull>(true);
+    SetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>(false);
     SetRawInputAt(0, array);
     SetRawInputAt(1, index);
     SetRawInputAt(2, value);
@@ -5104,11 +5220,11 @@
 
   bool NeedsEnvironment() const OVERRIDE {
     // We call a runtime method to throw ArrayStoreException.
-    return needs_type_check_;
+    return NeedsTypeCheck();
   }
 
   // Can throw ArrayStoreException.
-  bool CanThrow() const OVERRIDE { return needs_type_check_; }
+  bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); }
 
   bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
     // TODO: Same as for ArrayGet.
@@ -5116,20 +5232,22 @@
   }
 
   void ClearNeedsTypeCheck() {
-    needs_type_check_ = false;
+    SetPackedFlag<kFlagNeedsTypeCheck>(false);
   }
 
   void ClearValueCanBeNull() {
-    value_can_be_null_ = false;
+    SetPackedFlag<kFlagValueCanBeNull>(false);
   }
 
   void SetStaticTypeOfArrayIsObjectArray() {
-    static_type_of_array_is_object_array_ = true;
+    SetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>(true);
   }
 
-  bool GetValueCanBeNull() const { return value_can_be_null_; }
-  bool NeedsTypeCheck() const { return needs_type_check_; }
-  bool StaticTypeOfArrayIsObjectArray() const { return static_type_of_array_is_object_array_; }
+  bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
+  bool NeedsTypeCheck() const { return GetPackedFlag<kFlagNeedsTypeCheck>(); }
+  bool StaticTypeOfArrayIsObjectArray() const {
+    return GetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>();
+  }
 
   HInstruction* GetArray() const { return InputAt(0); }
   HInstruction* GetIndex() const { return InputAt(1); }
@@ -5143,11 +5261,11 @@
     Primitive::Type value_type = GetValue()->GetType();
     return ((value_type == Primitive::kPrimFloat) || (value_type == Primitive::kPrimDouble))
         ? value_type
-        : expected_component_type_;
+        : GetRawExpectedComponentType();
   }
 
   Primitive::Type GetRawExpectedComponentType() const {
-    return expected_component_type_;
+    return GetPackedField<ExpectedComponentTypeField>();
   }
 
   static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
@@ -5157,12 +5275,20 @@
   DECLARE_INSTRUCTION(ArraySet);
 
  private:
-  const Primitive::Type expected_component_type_;
-  bool needs_type_check_;
-  bool value_can_be_null_;
+  static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldExpectedComponentTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kFlagNeedsTypeCheck =
+      kFieldExpectedComponentType + kFieldExpectedComponentTypeSize;
+  static constexpr size_t kFlagValueCanBeNull = kFlagNeedsTypeCheck + 1;
   // Cached information for the reference_type_info_ so that codegen
   // does not need to inspect the static type.
-  bool static_type_of_array_is_object_array_;
+  static constexpr size_t kFlagStaticTypeOfArrayIsObjectArray = kFlagValueCanBeNull + 1;
+  static constexpr size_t kNumberOfArraySetPackedBits =
+      kFlagStaticTypeOfArrayIsObjectArray + 1;
+  static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using ExpectedComponentTypeField =
+      BitField<Primitive::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
 
   DISALLOW_COPY_AND_ASSIGN(HArraySet);
 };
@@ -5272,14 +5398,15 @@
       : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
-        is_referrers_class_(is_referrers_class),
-        generate_clinit_check_(false),
-        needs_access_check_(needs_access_check),
-        is_in_dex_cache_(is_in_dex_cache),
         loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
     // Referrers class should not need access check. We never inline unverified
     // methods so we can't possibly end up in this situation.
-    DCHECK(!is_referrers_class_ || !needs_access_check_);
+    DCHECK(!is_referrers_class || !needs_access_check);
+
+    SetPackedFlag<kFlagIsReferrersClass>(is_referrers_class);
+    SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
+    SetPackedFlag<kFlagIsInDexCache>(is_in_dex_cache);
+    SetPackedFlag<kFlagGenerateClInitCheck>(false);
     SetRawInputAt(0, current_method);
   }
 
@@ -5290,39 +5417,31 @@
     // Whether or not we need to generate the clinit check is processed in
     // prepare_for_register_allocator based on existing HInvokes and HClinitChecks.
     return other->AsLoadClass()->type_index_ == type_index_ &&
-        other->AsLoadClass()->needs_access_check_ == needs_access_check_;
+        other->AsLoadClass()->GetPackedFields() == GetPackedFields();
   }
 
   size_t ComputeHashCode() const OVERRIDE { return type_index_; }
 
   uint16_t GetTypeIndex() const { return type_index_; }
-  bool IsReferrersClass() const { return is_referrers_class_; }
   bool CanBeNull() const OVERRIDE { return false; }
 
   bool NeedsEnvironment() const OVERRIDE {
     return CanCallRuntime();
   }
 
-  bool MustGenerateClinitCheck() const {
-    return generate_clinit_check_;
-  }
-
   void SetMustGenerateClinitCheck(bool generate_clinit_check) {
     // The entrypoint the code generator is going to call does not do
     // clinit of the class.
     DCHECK(!NeedsAccessCheck());
-    generate_clinit_check_ = generate_clinit_check;
+    SetPackedFlag<kFlagGenerateClInitCheck>(generate_clinit_check);
   }
 
   bool CanCallRuntime() const {
     return MustGenerateClinitCheck() ||
-           (!is_referrers_class_ && !is_in_dex_cache_) ||
-           needs_access_check_;
+           (!IsReferrersClass() && !IsInDexCache()) ||
+           NeedsAccessCheck();
   }
 
-  bool NeedsAccessCheck() const {
-    return needs_access_check_;
-  }
 
   bool CanThrow() const OVERRIDE {
     return CanCallRuntime();
@@ -5340,25 +5459,31 @@
 
   const DexFile& GetDexFile() { return dex_file_; }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !is_referrers_class_; }
+  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !IsReferrersClass(); }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
     return SideEffects::CanTriggerGC();
   }
 
-  bool IsInDexCache() const { return is_in_dex_cache_; }
+  bool IsReferrersClass() const { return GetPackedFlag<kFlagIsReferrersClass>(); }
+  bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
+  bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
+  bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
 
   DECLARE_INSTRUCTION(LoadClass);
 
  private:
-  const uint16_t type_index_;
-  const DexFile& dex_file_;
-  const bool is_referrers_class_;
+  static constexpr size_t kFlagIsReferrersClass    = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFlagNeedsAccessCheck    = kFlagIsReferrersClass + 1;
+  static constexpr size_t kFlagIsInDexCache        = kFlagNeedsAccessCheck + 1;
   // Whether this instruction must generate the initialization check.
   // Used for code generation.
-  bool generate_clinit_check_;
-  const bool needs_access_check_;
-  const bool is_in_dex_cache_;
+  static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInDexCache + 1;
+  static constexpr size_t kNumberOfLoadClassPackedBits = kFlagGenerateClInitCheck + 1;
+  static_assert(kNumberOfLoadClassPackedBits < kMaxNumberOfPackedBits, "Too many packed fields.");
+
+  const uint16_t type_index_;
+  const DexFile& dex_file_;
 
   ReferenceTypeInfo loaded_class_rti_;
 
@@ -5372,8 +5497,8 @@
               uint32_t dex_pc,
               bool is_in_dex_cache)
       : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
-        string_index_(string_index),
-        is_in_dex_cache_(is_in_dex_cache) {
+        string_index_(string_index) {
+    SetPackedFlag<kFlagIsInDexCache>(is_in_dex_cache);
     SetRawInputAt(0, current_method);
   }
 
@@ -5392,18 +5517,22 @@
 
   bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return true; }
   bool CanBeNull() const OVERRIDE { return false; }
-  bool IsInDexCache() const { return is_in_dex_cache_; }
   bool CanThrow() const OVERRIDE { return !IsInDexCache(); }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
     return SideEffects::CanTriggerGC();
   }
 
+  bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
+
   DECLARE_INSTRUCTION(LoadString);
 
  private:
+  static constexpr size_t kFlagIsInDexCache = kNumberOfExpressionPackedBits;
+  static constexpr size_t kNumberOfLoadStringPackedBits = kFlagIsInDexCache + 1;
+  static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+
   const uint32_t string_index_;
-  const bool is_in_dex_cache_;
 
   DISALLOW_COPY_AND_ASSIGN(HLoadString);
 };
@@ -5510,8 +5639,8 @@
                     field_idx,
                     declaring_class_def_index,
                     dex_file,
-                    dex_cache),
-        value_can_be_null_(true) {
+                    dex_cache) {
+    SetPackedFlag<kFlagValueCanBeNull>(true);
     SetRawInputAt(0, cls);
     SetRawInputAt(1, value);
   }
@@ -5522,14 +5651,18 @@
   bool IsVolatile() const { return field_info_.IsVolatile(); }
 
   HInstruction* GetValue() const { return InputAt(1); }
-  bool GetValueCanBeNull() const { return value_can_be_null_; }
-  void ClearValueCanBeNull() { value_can_be_null_ = false; }
+  bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
+  void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
 
   DECLARE_INSTRUCTION(StaticFieldSet);
 
  private:
+  static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
+  static constexpr size_t kNumberOfStaticFieldSetPackedBits = kFlagValueCanBeNull + 1;
+  static_assert(kNumberOfStaticFieldSetPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+
   const FieldInfo field_info_;
-  bool value_can_be_null_;
 
   DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
 };
@@ -5567,8 +5700,8 @@
                               uint32_t field_index,
                               uint32_t dex_pc)
       : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
-        field_type_(field_type),
         field_index_(field_index) {
+    SetPackedField<FieldTypeField>(field_type);
     DCHECK_EQ(field_type, value->GetType());
     SetRawInputAt(0, obj);
     SetRawInputAt(1, value);
@@ -5577,13 +5710,21 @@
   bool NeedsEnvironment() const OVERRIDE { return true; }
   bool CanThrow() const OVERRIDE { return true; }
 
-  Primitive::Type GetFieldType() const { return field_type_; }
+  Primitive::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
 
   DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
 
  private:
-  const Primitive::Type field_type_;
+  static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldFieldTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits =
+      kFieldFieldType + kFieldFieldTypeSize;
+  static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using FieldTypeField = BitField<Primitive::Type, kFieldFieldType, kFieldFieldTypeSize>;
+
   const uint32_t field_index_;
 
   DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
@@ -5619,8 +5760,8 @@
                             uint32_t field_index,
                             uint32_t dex_pc)
       : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
-        field_type_(field_type),
         field_index_(field_index) {
+    SetPackedField<FieldTypeField>(field_type);
     DCHECK_EQ(field_type, value->GetType());
     SetRawInputAt(0, value);
   }
@@ -5628,13 +5769,21 @@
   bool NeedsEnvironment() const OVERRIDE { return true; }
   bool CanThrow() const OVERRIDE { return true; }
 
-  Primitive::Type GetFieldType() const { return field_type_; }
+  Primitive::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
 
   DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
 
  private:
-  const Primitive::Type field_type_;
+  static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldFieldTypeSize =
+      MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
+  static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits =
+      kFieldFieldType + kFieldFieldTypeSize;
+  static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using FieldTypeField = BitField<Primitive::Type, kFieldFieldType, kFieldFieldTypeSize>;
+
   const uint32_t field_index_;
 
   DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
@@ -5698,7 +5847,8 @@
   kAbstractClassCheck,    // Can just walk the super class chain, starting one up.
   kInterfaceCheck,        // No optimization yet when checking against an interface.
   kArrayObjectCheck,      // Can just check if the array is not primitive.
-  kArrayCheck             // No optimization yet when checking against a generic array.
+  kArrayCheck,            // No optimization yet when checking against a generic array.
+  kLast = kArrayCheck
 };
 
 std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs);
@@ -5711,9 +5861,9 @@
               uint32_t dex_pc)
       : HExpression(Primitive::kPrimBoolean,
                     SideEffectsForArchRuntimeCalls(check_kind),
-                    dex_pc),
-        check_kind_(check_kind),
-        must_do_null_check_(true) {
+                    dex_pc) {
+    SetPackedField<TypeCheckKindField>(check_kind);
+    SetPackedFlag<kFlagMustDoNullCheck>(true);
     SetRawInputAt(0, object);
     SetRawInputAt(1, constant);
   }
@@ -5725,16 +5875,14 @@
   }
 
   bool NeedsEnvironment() const OVERRIDE {
-    return CanCallRuntime(check_kind_);
+    return CanCallRuntime(GetTypeCheckKind());
   }
 
-  bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; }
-
-  TypeCheckKind GetTypeCheckKind() const { return check_kind_; }
-
   // Used only in code generation.
-  bool MustDoNullCheck() const { return must_do_null_check_; }
-  void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+  bool MustDoNullCheck() const { return GetPackedFlag<kFlagMustDoNullCheck>(); }
+  void ClearMustDoNullCheck() { SetPackedFlag<kFlagMustDoNullCheck>(false); }
+  TypeCheckKind GetTypeCheckKind() const { return GetPackedField<TypeCheckKindField>(); }
+  bool IsExactCheck() const { return GetTypeCheckKind() == TypeCheckKind::kExactCheck; }
 
   static bool CanCallRuntime(TypeCheckKind check_kind) {
     // Mips currently does runtime calls for any other checks.
@@ -5748,8 +5896,13 @@
   DECLARE_INSTRUCTION(InstanceOf);
 
  private:
-  const TypeCheckKind check_kind_;
-  bool must_do_null_check_;
+  static constexpr size_t kFieldTypeCheckKind = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFieldTypeCheckKindSize =
+      MinimumBitsToStore(static_cast<size_t>(TypeCheckKind::kLast));
+  static constexpr size_t kFlagMustDoNullCheck = kFieldTypeCheckKind + kFieldTypeCheckKindSize;
+  static constexpr size_t kNumberOfInstanceOfPackedBits = kFlagMustDoNullCheck + 1;
+  static_assert(kNumberOfInstanceOfPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
 
   DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
 };
@@ -5758,28 +5911,35 @@
  public:
   HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc),
-        upper_bound_(ReferenceTypeInfo::CreateInvalid()),
-        upper_can_be_null_(true),
-        can_be_null_(true) {
+        upper_bound_(ReferenceTypeInfo::CreateInvalid()) {
+    SetPackedFlag<kFlagUpperCanBeNull>(true);
+    SetPackedFlag<kFlagCanBeNull>(true);
     DCHECK_EQ(input->GetType(), Primitive::kPrimNot);
     SetRawInputAt(0, input);
   }
 
   // {Get,Set}Upper* should only be used in reference type propagation.
   const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
-  bool GetUpperCanBeNull() const { return upper_can_be_null_; }
+  bool GetUpperCanBeNull() const { return GetPackedFlag<kFlagUpperCanBeNull>(); }
   void SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null);
 
   void SetCanBeNull(bool can_be_null) {
-    DCHECK(upper_can_be_null_ || !can_be_null);
-    can_be_null_ = can_be_null;
+    DCHECK(GetUpperCanBeNull() || !can_be_null);
+    SetPackedFlag<kFlagCanBeNull>(can_be_null);
   }
 
-  bool CanBeNull() const OVERRIDE { return can_be_null_; }
+  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
 
   DECLARE_INSTRUCTION(BoundType);
 
  private:
+  // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
+  // is false then CanBeNull() cannot be true).
+  static constexpr size_t kFlagUpperCanBeNull = kNumberOfExpressionPackedBits;
+  static constexpr size_t kFlagCanBeNull = kFlagUpperCanBeNull + 1;
+  static constexpr size_t kNumberOfBoundTypePackedBits = kFlagCanBeNull + 1;
+  static_assert(kNumberOfBoundTypePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+
   // Encodes the most upper class that this instruction can have. In other words
   // it is always the case that GetUpperBound().IsSupertypeOf(GetReferenceType()).
   // It is used to bound the type in cases like:
@@ -5787,10 +5947,6 @@
   //     // uper_bound_ will be ClassX
   //   }
   ReferenceTypeInfo upper_bound_;
-  // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
-  // is false then can_be_null_ cannot be true).
-  bool upper_can_be_null_;
-  bool can_be_null_;
 
   DISALLOW_COPY_AND_ASSIGN(HBoundType);
 };
@@ -5801,9 +5957,9 @@
              HLoadClass* constant,
              TypeCheckKind check_kind,
              uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc),
-        check_kind_(check_kind),
-        must_do_null_check_(true) {
+      : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
+    SetPackedField<TypeCheckKindField>(check_kind);
+    SetPackedFlag<kFlagMustDoNullCheck>(true);
     SetRawInputAt(0, object);
     SetRawInputAt(1, constant);
   }
@@ -5821,17 +5977,21 @@
 
   bool CanThrow() const OVERRIDE { return true; }
 
-  bool MustDoNullCheck() const { return must_do_null_check_; }
-  void ClearMustDoNullCheck() { must_do_null_check_ = false; }
-  TypeCheckKind GetTypeCheckKind() const { return check_kind_; }
-
-  bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; }
+  bool MustDoNullCheck() const { return GetPackedFlag<kFlagMustDoNullCheck>(); }
+  void ClearMustDoNullCheck() { SetPackedFlag<kFlagMustDoNullCheck>(false); }
+  TypeCheckKind GetTypeCheckKind() const { return GetPackedField<TypeCheckKindField>(); }
+  bool IsExactCheck() const { return GetTypeCheckKind() == TypeCheckKind::kExactCheck; }
 
   DECLARE_INSTRUCTION(CheckCast);
 
  private:
-  const TypeCheckKind check_kind_;
-  bool must_do_null_check_;
+  static constexpr size_t kFieldTypeCheckKind = kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldTypeCheckKindSize =
+      MinimumBitsToStore(static_cast<size_t>(TypeCheckKind::kLast));
+  static constexpr size_t kFlagMustDoNullCheck = kFieldTypeCheckKind + kFieldTypeCheckKindSize;
+  static constexpr size_t kNumberOfCheckCastPackedBits = kFlagMustDoNullCheck + 1;
+  static_assert(kNumberOfCheckCastPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+  using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
 
   DISALLOW_COPY_AND_ASSIGN(HCheckCast);
 };
@@ -5840,30 +6000,40 @@
  public:
   explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
       : HTemplateInstruction(
-            SideEffects::AllWritesAndReads(), dex_pc),  // Assume write/read on all fields/arrays.
-        barrier_kind_(barrier_kind) {}
+            SideEffects::AllWritesAndReads(), dex_pc) {  // Assume write/read on all fields/arrays.
+    SetPackedField<BarrierKindField>(barrier_kind);
+  }
 
-  MemBarrierKind GetBarrierKind() { return barrier_kind_; }
+  MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
 
   DECLARE_INSTRUCTION(MemoryBarrier);
 
  private:
-  const MemBarrierKind barrier_kind_;
+  static constexpr size_t kFieldBarrierKind = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldBarrierKindSize =
+      MinimumBitsToStore(static_cast<size_t>(kLastBarrierKind));
+  static constexpr size_t kNumberOfMemoryBarrierPackedBits =
+      kFieldBarrierKind + kFieldBarrierKindSize;
+  static_assert(kNumberOfMemoryBarrierPackedBits <= kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using BarrierKindField = BitField<MemBarrierKind, kFieldBarrierKind, kFieldBarrierKindSize>;
 
   DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier);
 };
 
 class HMonitorOperation : public HTemplateInstruction<1> {
  public:
-  enum OperationKind {
+  enum class OperationKind {
     kEnter,
     kExit,
+    kLast = kExit
   };
 
   HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
     : HTemplateInstruction(
-          SideEffects::AllExceptGCDependency(), dex_pc),  // Assume write/read on all fields/arrays.
-      kind_(kind) {
+          SideEffects::AllExceptGCDependency(),  // Assume write/read on all fields/arrays.
+          dex_pc) {
+    SetPackedField<OperationKindField>(kind);
     SetRawInputAt(0, object);
   }
 
@@ -5877,13 +6047,20 @@
     return IsEnter();
   }
 
-
-  bool IsEnter() const { return kind_ == kEnter; }
+  OperationKind GetOperationKind() const { return GetPackedField<OperationKindField>(); }
+  bool IsEnter() const { return GetOperationKind() == OperationKind::kEnter; }
 
   DECLARE_INSTRUCTION(MonitorOperation);
 
  private:
-  const OperationKind kind_;
+  static constexpr size_t kFieldOperationKind = HInstruction::kNumberOfGenericPackedBits;
+  static constexpr size_t kFieldOperationKindSize =
+      MinimumBitsToStore(static_cast<size_t>(OperationKind::kLast));
+  static constexpr size_t kNumberOfMonitorOperationPackedBits =
+      kFieldOperationKind + kFieldOperationKindSize;
+  static_assert(kNumberOfMonitorOperationPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
+  using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
@@ -6060,6 +6237,9 @@
 
 }  // namespace art
 
+#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
+#include "nodes_shared.h"
+#endif
 #ifdef ART_ENABLE_CODEGEN_arm
 #include "nodes_arm.h"
 #endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 445cdab..75a71e7 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -118,38 +118,64 @@
   DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
 };
 
-class HArm64MultiplyAccumulate : public HExpression<3> {
+class HArm64BitwiseNegatedRight : public HBinaryOperation {
  public:
-  HArm64MultiplyAccumulate(Primitive::Type type,
-                           InstructionKind op,
-                           HInstruction* accumulator,
-                           HInstruction* mul_left,
-                           HInstruction* mul_right,
-                           uint32_t dex_pc = kNoDexPc)
-      : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) {
-    SetRawInputAt(kInputAccumulatorIndex, accumulator);
-    SetRawInputAt(kInputMulLeftIndex, mul_left);
-    SetRawInputAt(kInputMulRightIndex, mul_right);
+  HArm64BitwiseNegatedRight(Primitive::Type result_type,
+                            InstructionKind op,
+                            HInstruction* left,
+                            HInstruction* right,
+                            uint32_t dex_pc = kNoDexPc)
+    : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc),
+      op_kind_(op) {
+    DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op;
   }
 
-  static constexpr int kInputAccumulatorIndex = 0;
-  static constexpr int kInputMulLeftIndex = 1;
-  static constexpr int kInputMulRightIndex = 2;
+  template <typename T, typename U>
+  auto Compute(T x, U y) const -> decltype(x & ~y) {
+    static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value &&
+                  std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value,
+                  "Inconsistent negated bitwise types");
+    switch (op_kind_) {
+      case HInstruction::kAnd:
+        return x & ~y;
+      case HInstruction::kOr:
+        return x | ~y;
+      case HInstruction::kXor:
+        return x ^ ~y;
+      default:
+        LOG(FATAL) << "Unreachable";
+        UNREACHABLE();
+    }
+  }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
-    return op_kind_ == other->AsArm64MultiplyAccumulate()->op_kind_;
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetIntConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetLongConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+    LOG(FATAL) << DebugName() << " is not defined for float values";
+    UNREACHABLE();
+  }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+    LOG(FATAL) << DebugName() << " is not defined for double values";
+    UNREACHABLE();
   }
 
   InstructionKind GetOpKind() const { return op_kind_; }
 
-  DECLARE_INSTRUCTION(Arm64MultiplyAccumulate);
+  DECLARE_INSTRUCTION(Arm64BitwiseNegatedRight);
 
  private:
-  // Indicates if this is a MADD or MSUB.
-  InstructionKind op_kind_;
+  // Specifies the bitwise operation, which will be then negated.
+  const InstructionKind op_kind_;
 
-  DISALLOW_COPY_AND_ASSIGN(HArm64MultiplyAccumulate);
+  DISALLOW_COPY_AND_ASSIGN(HArm64BitwiseNegatedRight);
 };
 
 }  // namespace art
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
new file mode 100644
index 0000000..b04b622
--- /dev/null
+++ b/compiler/optimizing/nodes_shared.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
+#define ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
+
+namespace art {
+
+class HMultiplyAccumulate : public HExpression<3> {
+ public:
+  HMultiplyAccumulate(Primitive::Type type,
+                      InstructionKind op,
+                      HInstruction* accumulator,
+                      HInstruction* mul_left,
+                      HInstruction* mul_right,
+                      uint32_t dex_pc = kNoDexPc)
+      : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) {
+    SetRawInputAt(kInputAccumulatorIndex, accumulator);
+    SetRawInputAt(kInputMulLeftIndex, mul_left);
+    SetRawInputAt(kInputMulRightIndex, mul_right);
+  }
+
+  static constexpr int kInputAccumulatorIndex = 0;
+  static constexpr int kInputMulLeftIndex = 1;
+  static constexpr int kInputMulRightIndex = 2;
+
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+    return op_kind_ == other->AsMultiplyAccumulate()->op_kind_;
+  }
+
+  InstructionKind GetOpKind() const { return op_kind_; }
+
+  DECLARE_INSTRUCTION(MultiplyAccumulate);
+
+ private:
+  // Indicates if this is a MADD or MSUB.
+  const InstructionKind op_kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b1891c9..c1b4d24 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -60,6 +60,7 @@
 #include "induction_var_analysis.h"
 #include "inliner.h"
 #include "instruction_simplifier.h"
+#include "instruction_simplifier_arm.h"
 #include "intrinsics.h"
 #include "jit/debugger_interface.h"
 #include "jit/jit_code_cache.h"
@@ -438,7 +439,10 @@
     case kThumb2:
     case kArm: {
       arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats);
+      arm::InstructionSimplifierArm* simplifier =
+          new (arena) arm::InstructionSimplifierArm(graph, stats);
       HOptimization* arm_optimizations[] = {
+        simplifier,
         fixups
       };
       RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
@@ -479,7 +483,11 @@
 static void AllocateRegisters(HGraph* graph,
                               CodeGenerator* codegen,
                               PassObserver* pass_observer) {
-  PrepareForRegisterAllocation(graph).Run();
+  {
+    PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
+                    pass_observer);
+    PrepareForRegisterAllocation(graph).Run();
+  }
   SsaLivenessAnalysis liveness(graph, codegen);
   {
     PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
@@ -857,7 +865,7 @@
   const uint32_t access_flags = method->GetAccessFlags();
   const InvokeType invoke_type = method->GetInvokeType();
 
-  ArenaAllocator arena(Runtime::Current()->GetArenaPool());
+  ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
   CodeVectorAllocator code_allocator(&arena);
   std::unique_ptr<CodeGenerator> codegen;
   {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index c8b8b0d..c90724c 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -32,6 +32,9 @@
 
   void Run();
 
+  static constexpr const char* kPrepareForRegisterAllocationPassName =
+      "prepare_for_register_allocation";
+
  private:
   void VisitNullCheck(HNullCheck* check) OVERRIDE;
   void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 4784de1..54cbdf8 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -63,8 +63,7 @@
 void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
   if (kind != DexRegisterLocation::Kind::kNone) {
     // Ensure we only use non-compressed location kind at this stage.
-    DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
-        << DexRegisterLocation::PrettyDescriptor(kind);
+    DCHECK(DexRegisterLocation::IsShortLocationKind(kind)) << kind;
     DexRegisterLocation location(kind, value);
 
     // Look for Dex register `location` in the location catalog (using the
@@ -257,6 +256,7 @@
   // Ensure we reached the end of the Dex registers location_catalog.
   DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
 
+  ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false);
   uintptr_t next_dex_register_map_offset = 0;
   uintptr_t next_inline_info_offset = 0;
   for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
@@ -268,6 +268,9 @@
     stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
     if (entry.sp_mask != nullptr) {
       stack_map.SetStackMask(stack_map_encoding_, *entry.sp_mask);
+    } else {
+      // The MemoryRegion does not have to be zeroed, so make sure we clear the bits.
+      stack_map.SetStackMask(stack_map_encoding_, empty_bitmask);
     }
 
     if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
@@ -344,6 +347,11 @@
       }
     }
   }
+
+  // Verify all written data in debug build.
+  if (kIsDebugBuild) {
+    CheckCodeInfo(region);
+  }
 }
 
 void StackMapStream::FillInDexRegisterMap(DexRegisterMap dex_register_map,
@@ -423,4 +431,90 @@
   return true;
 }
 
+// Helper for CheckCodeInfo - check that register map has the expected content.
+void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info,
+                                         const DexRegisterMap& dex_register_map,
+                                         size_t num_dex_registers,
+                                         BitVector* live_dex_registers_mask,
+                                         size_t dex_register_locations_index) const {
+  StackMapEncoding encoding = code_info.ExtractEncoding();
+  for (size_t reg = 0; reg < num_dex_registers; reg++) {
+    // Find the location we tried to encode.
+    DexRegisterLocation expected = DexRegisterLocation::None();
+    if (live_dex_registers_mask->IsBitSet(reg)) {
+      size_t catalog_index = dex_register_locations_[dex_register_locations_index++];
+      expected = location_catalog_entries_[catalog_index];
+    }
+    // Compare to the seen location.
+    if (expected.GetKind() == DexRegisterLocation::Kind::kNone) {
+      DCHECK(!dex_register_map.IsValid() || !dex_register_map.IsDexRegisterLive(reg));
+    } else {
+      DCHECK(dex_register_map.IsDexRegisterLive(reg));
+      DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(
+          reg, num_dex_registers, code_info, encoding);
+      DCHECK_EQ(expected.GetKind(), seen.GetKind());
+      DCHECK_EQ(expected.GetValue(), seen.GetValue());
+    }
+  }
+  if (num_dex_registers == 0) {
+    DCHECK(!dex_register_map.IsValid());
+  }
+}
+
+// Check that all StackMapStream inputs are correctly encoded by trying to read them back.
+void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
+  CodeInfo code_info(region);
+  StackMapEncoding encoding = code_info.ExtractEncoding();
+  DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
+  for (size_t s = 0; s < stack_maps_.size(); ++s) {
+    const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+    StackMapEntry entry = stack_maps_[s];
+
+    // Check main stack map fields.
+    DCHECK_EQ(stack_map.GetNativePcOffset(encoding), entry.native_pc_offset);
+    DCHECK_EQ(stack_map.GetDexPc(encoding), entry.dex_pc);
+    DCHECK_EQ(stack_map.GetRegisterMask(encoding), entry.register_mask);
+    MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+    if (entry.sp_mask != nullptr) {
+      DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits());
+      for (size_t b = 0; b < stack_mask.size_in_bits(); b++) {
+        DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b));
+      }
+    } else {
+      for (size_t b = 0; b < stack_mask.size_in_bits(); b++) {
+        DCHECK_EQ(stack_mask.LoadBit(b), 0u);
+      }
+    }
+
+    CheckDexRegisterMap(code_info,
+                        code_info.GetDexRegisterMapOf(
+                            stack_map, encoding, entry.num_dex_registers),
+                        entry.num_dex_registers,
+                        entry.live_dex_registers_mask,
+                        entry.dex_register_locations_start_index);
+
+    // Check inline info.
+    DCHECK_EQ(stack_map.HasInlineInfo(encoding), (entry.inlining_depth != 0));
+    if (entry.inlining_depth != 0) {
+      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+      DCHECK_EQ(inline_info.GetDepth(), entry.inlining_depth);
+      for (size_t d = 0; d < entry.inlining_depth; ++d) {
+        size_t inline_info_index = entry.inline_infos_start_index + d;
+        DCHECK_LT(inline_info_index, inline_infos_.size());
+        InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
+        DCHECK_EQ(inline_info.GetDexPcAtDepth(d), inline_entry.dex_pc);
+        DCHECK_EQ(inline_info.GetMethodIndexAtDepth(d), inline_entry.method_index);
+        DCHECK_EQ(inline_info.GetInvokeTypeAtDepth(d), inline_entry.invoke_type);
+
+        CheckDexRegisterMap(code_info,
+                            code_info.GetDexRegisterMapAtDepth(
+                                d, inline_info, encoding, inline_entry.num_dex_registers),
+                            inline_entry.num_dex_registers,
+                            inline_entry.live_dex_registers_mask,
+                            inline_entry.dex_register_locations_start_index);
+      }
+    }
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index fc27a2b..016a911 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -167,6 +167,13 @@
                             const BitVector& live_dex_registers_mask,
                             uint32_t start_index_in_dex_register_locations) const;
 
+  void CheckDexRegisterMap(const CodeInfo& code_info,
+                           const DexRegisterMap& dex_register_map,
+                           size_t num_dex_registers,
+                           BitVector* live_dex_registers_mask,
+                           size_t dex_register_locations_index) const;
+  void CheckCodeInfo(MemoryRegion region) const;
+
   ArenaAllocator* allocator_;
   ArenaVector<StackMapEntry> stack_maps_;
 
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index f96376d..a894565 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -545,6 +545,9 @@
   virtual void movw(Register rd, uint16_t imm16, Condition cond = AL) = 0;
   virtual void movt(Register rd, uint16_t imm16, Condition cond = AL) = 0;
   virtual void rbit(Register rd, Register rm, Condition cond = AL) = 0;
+  virtual void rev(Register rd, Register rm, Condition cond = AL) = 0;
+  virtual void rev16(Register rd, Register rm, Condition cond = AL) = 0;
+  virtual void revsh(Register rd, Register rm, Condition cond = AL) = 0;
 
   // Multiply instructions.
   virtual void mul(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index ebca25b..0a227b2 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -750,6 +750,35 @@
 }
 
 
+void Arm32Assembler::EmitMiscellaneous(Condition cond, uint8_t op1,
+                                       uint8_t op2, uint32_t a_part,
+                                       uint32_t rest) {
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                      B26 | B25 | B23 |
+                      (op1 << 20) |
+                      (a_part << 16) |
+                      (op2 << 5) |
+                      B4 |
+                      rest;
+  Emit(encoding);
+}
+
+
+void Arm32Assembler::EmitReverseBytes(Register rd, Register rm, Condition cond,
+                                      uint8_t op1, uint8_t op2) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+
+  int32_t encoding = (static_cast<int32_t>(rd) << kRdShift) |
+                     (0b1111 << 8) |
+                     static_cast<int32_t>(rm);
+  EmitMiscellaneous(cond, op1, op2, 0b1111, encoding);
+}
+
+
 void Arm32Assembler::rbit(Register rd, Register rm, Condition cond) {
   CHECK_NE(rd, kNoRegister);
   CHECK_NE(rm, kNoRegister);
@@ -764,6 +793,21 @@
 }
 
 
+void Arm32Assembler::rev(Register rd, Register rm, Condition cond) {
+  EmitReverseBytes(rd, rm, cond, 0b011, 0b001);
+}
+
+
+void Arm32Assembler::rev16(Register rd, Register rm, Condition cond) {
+  EmitReverseBytes(rd, rm, cond, 0b011, 0b101);
+}
+
+
+void Arm32Assembler::revsh(Register rd, Register rm, Condition cond) {
+  EmitReverseBytes(rd, rm, cond, 0b111, 0b101);
+}
+
+
 void Arm32Assembler::EmitMulOp(Condition cond, int32_t opcode,
                                Register rd, Register rn,
                                Register rm, Register rs) {
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index bf332fe..e3e05ca 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -91,6 +91,9 @@
   void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void rev(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void rev16(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void revsh(Register rd, Register rm, Condition cond = AL) OVERRIDE;
 
   // Multiply instructions.
   void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
@@ -388,6 +391,11 @@
 
   void EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond);
 
+  void EmitMiscellaneous(Condition cond, uint8_t op1, uint8_t op2,
+                         uint32_t a_part, uint32_t rest);
+  void EmitReverseBytes(Register rd, Register rm, Condition cond,
+                        uint8_t op1, uint8_t op2);
+
   void EmitBranch(Condition cond, Label* label, bool link);
   static int32_t EncodeBranchOffset(int offset, int32_t inst);
   static int DecodeBranchOffset(int32_t inst);
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 4380596..e570e22 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -887,4 +887,16 @@
   T3Helper(&arm::Arm32Assembler::rbit, true, "rbit{cond} {reg1}, {reg2}", "rbit");
 }
 
+TEST_F(AssemblerArm32Test, rev) {
+  T3Helper(&arm::Arm32Assembler::rev, true, "rev{cond} {reg1}, {reg2}", "rev");
+}
+
+TEST_F(AssemblerArm32Test, rev16) {
+  T3Helper(&arm::Arm32Assembler::rev16, true, "rev16{cond} {reg1}, {reg2}", "rev16");
+}
+
+TEST_F(AssemblerArm32Test, revsh) {
+  T3Helper(&arm::Arm32Assembler::revsh, true, "revsh{cond} {reg1}, {reg2}", "revsh");
+}
+
 }  // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 52023a6..15298b3 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2569,20 +2569,36 @@
 }
 
 
+void Thumb2Assembler::Emit32Miscellaneous(uint8_t op1,
+                                          uint8_t op2,
+                                          uint32_t rest_encoding) {
+  int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B23 |
+      op1 << 20 |
+      0xf << 12 |
+      B7 |
+      op2 << 4 |
+      rest_encoding;
+  Emit32(encoding);
+}
+
+
+void Thumb2Assembler::Emit16Miscellaneous(uint32_t rest_encoding) {
+  int16_t encoding = B15 | B13 | B12 |
+      rest_encoding;
+  Emit16(encoding);
+}
+
 void Thumb2Assembler::clz(Register rd, Register rm, Condition cond) {
   CHECK_NE(rd, kNoRegister);
   CHECK_NE(rm, kNoRegister);
   CheckCondition(cond);
   CHECK_NE(rd, PC);
   CHECK_NE(rm, PC);
-  int32_t encoding = B31 | B30 | B29 | B28 | B27 |
-      B25 | B23 | B21 | B20 |
+  int32_t encoding =
       static_cast<uint32_t>(rm) << 16 |
-      0xf << 12 |
       static_cast<uint32_t>(rd) << 8 |
-      B7 |
       static_cast<uint32_t>(rm);
-  Emit32(encoding);
+  Emit32Miscellaneous(0b11, 0b00, encoding);
 }
 
 
@@ -2630,14 +2646,55 @@
   CHECK_NE(rm, PC);
   CHECK_NE(rd, SP);
   CHECK_NE(rm, SP);
-  int32_t encoding = B31 | B30 | B29 | B28 | B27 |
-      B25 | B23 | B20 |
+  int32_t encoding =
       static_cast<uint32_t>(rm) << 16 |
-      0xf << 12 |
       static_cast<uint32_t>(rd) << 8 |
-      B7 | B5 |
       static_cast<uint32_t>(rm);
-  Emit32(encoding);
+
+  Emit32Miscellaneous(0b01, 0b10, encoding);
+}
+
+
+void Thumb2Assembler::EmitReverseBytes(Register rd, Register rm,
+                                       uint32_t op) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+  CHECK_NE(rd, SP);
+  CHECK_NE(rm, SP);
+
+  if (!IsHighRegister(rd) && !IsHighRegister(rm) && !force_32bit_) {
+    uint16_t t1_op = B11 | B9 | (op << 6);
+    int16_t encoding = t1_op |
+        static_cast<uint16_t>(rm) << 3 |
+        static_cast<uint16_t>(rd);
+    Emit16Miscellaneous(encoding);
+  } else {
+    int32_t encoding =
+        static_cast<uint32_t>(rm) << 16 |
+        static_cast<uint32_t>(rd) << 8 |
+        static_cast<uint32_t>(rm);
+    Emit32Miscellaneous(0b01, op, encoding);
+  }
+}
+
+
+void Thumb2Assembler::rev(Register rd, Register rm, Condition cond) {
+  CheckCondition(cond);
+  EmitReverseBytes(rd, rm, 0b00);
+}
+
+
+void Thumb2Assembler::rev16(Register rd, Register rm, Condition cond) {
+  CheckCondition(cond);
+  EmitReverseBytes(rd, rm, 0b01);
+}
+
+
+void Thumb2Assembler::revsh(Register rd, Register rm, Condition cond) {
+  CheckCondition(cond);
+  EmitReverseBytes(rd, rm, 0b11);
 }
 
 
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index bf07b2d..6b61aca 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -117,6 +117,9 @@
   void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void rev(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void rev16(Register rd, Register rm, Condition cond = AL) OVERRIDE;
+  void revsh(Register rd, Register rm, Condition cond = AL) OVERRIDE;
 
   // Multiply instructions.
   void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
@@ -644,6 +647,17 @@
                           Register rd,
                           const ShifterOperand& so);
 
+  // Emit a single 32 bit miscellaneous instruction.
+  void Emit32Miscellaneous(uint8_t op1,
+                           uint8_t op2,
+                           uint32_t rest_encoding);
+
+  // Emit reverse byte instructions: rev, rev16, revsh.
+  void EmitReverseBytes(Register rd, Register rm, uint32_t op);
+
+  // Emit a single 16 bit miscellaneous instruction.
+  void Emit16Miscellaneous(uint32_t rest_encoding);
+
   // Must the instruction be 32 bits or can it possibly be encoded
   // in 16 bits?
   bool Is32BitDataProcessing(Condition cond,
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 7b32b0f..650b089 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -1331,4 +1331,28 @@
   DriverStr(expected, "rbit");
 }
 
+TEST_F(AssemblerThumb2Test, rev) {
+  __ rev(arm::R1, arm::R0);
+
+  const char* expected = "rev r1, r0\n";
+
+  DriverStr(expected, "rev");
+}
+
+TEST_F(AssemblerThumb2Test, rev16) {
+  __ rev16(arm::R1, arm::R0);
+
+  const char* expected = "rev16 r1, r0\n";
+
+  DriverStr(expected, "rev16");
+}
+
+TEST_F(AssemblerThumb2Test, revsh) {
+  __ revsh(arm::R1, arm::R0);
+
+  const char* expected = "revsh r1, r0\n";
+
+  DriverStr(expected, "revsh");
+}
+
 }  // namespace art
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 6fd65ee..7c41813 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -537,12 +537,20 @@
   EmitI(0x7, rt, static_cast<Register>(0), imm16);
 }
 
+void MipsAssembler::Bc1f(uint16_t imm16) {
+  Bc1f(0, imm16);
+}
+
 void MipsAssembler::Bc1f(int cc, uint16_t imm16) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitI(0x11, static_cast<Register>(0x8), static_cast<Register>(cc << 2), imm16);
 }
 
+void MipsAssembler::Bc1t(uint16_t imm16) {
+  Bc1t(0, imm16);
+}
+
 void MipsAssembler::Bc1t(int cc, uint16_t imm16) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
@@ -843,6 +851,22 @@
   EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
 }
 
+void MipsAssembler::SqrtS(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x4);
+}
+
+void MipsAssembler::SqrtD(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x4);
+}
+
+void MipsAssembler::AbsS(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x5);
+}
+
+void MipsAssembler::AbsD(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x5);
+}
+
 void MipsAssembler::MovS(FRegister fd, FRegister fs) {
   EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
 }
@@ -859,84 +883,140 @@
   EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7);
 }
 
+void MipsAssembler::CunS(FRegister fs, FRegister ft) {
+  CunS(0, fs, ft);
+}
+
 void MipsAssembler::CunS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x31);
 }
 
+void MipsAssembler::CeqS(FRegister fs, FRegister ft) {
+  CeqS(0, fs, ft);
+}
+
 void MipsAssembler::CeqS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x32);
 }
 
+void MipsAssembler::CueqS(FRegister fs, FRegister ft) {
+  CueqS(0, fs, ft);
+}
+
 void MipsAssembler::CueqS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x33);
 }
 
+void MipsAssembler::ColtS(FRegister fs, FRegister ft) {
+  ColtS(0, fs, ft);
+}
+
 void MipsAssembler::ColtS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x34);
 }
 
+void MipsAssembler::CultS(FRegister fs, FRegister ft) {
+  CultS(0, fs, ft);
+}
+
 void MipsAssembler::CultS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x35);
 }
 
+void MipsAssembler::ColeS(FRegister fs, FRegister ft) {
+  ColeS(0, fs, ft);
+}
+
 void MipsAssembler::ColeS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x36);
 }
 
+void MipsAssembler::CuleS(FRegister fs, FRegister ft) {
+  CuleS(0, fs, ft);
+}
+
 void MipsAssembler::CuleS(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x37);
 }
 
+void MipsAssembler::CunD(FRegister fs, FRegister ft) {
+  CunD(0, fs, ft);
+}
+
 void MipsAssembler::CunD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x31);
 }
 
+void MipsAssembler::CeqD(FRegister fs, FRegister ft) {
+  CeqD(0, fs, ft);
+}
+
 void MipsAssembler::CeqD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x32);
 }
 
+void MipsAssembler::CueqD(FRegister fs, FRegister ft) {
+  CueqD(0, fs, ft);
+}
+
 void MipsAssembler::CueqD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x33);
 }
 
+void MipsAssembler::ColtD(FRegister fs, FRegister ft) {
+  ColtD(0, fs, ft);
+}
+
 void MipsAssembler::ColtD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x34);
 }
 
+void MipsAssembler::CultD(FRegister fs, FRegister ft) {
+  CultD(0, fs, ft);
+}
+
 void MipsAssembler::CultD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x35);
 }
 
+void MipsAssembler::ColeD(FRegister fs, FRegister ft) {
+  ColeD(0, fs, ft);
+}
+
 void MipsAssembler::ColeD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
   EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x36);
 }
 
+void MipsAssembler::CuleD(FRegister fs, FRegister ft) {
+  CuleD(0, fs, ft);
+}
+
 void MipsAssembler::CuleD(int cc, FRegister fs, FRegister ft) {
   CHECK(!IsR6());
   CHECK(IsUint<3>(cc)) << cc;
@@ -1055,6 +1135,70 @@
   EmitR(0, rs, static_cast<Register>((cc << 2) | 1), rd, 0, 0x01);
 }
 
+void MipsAssembler::MovfS(FRegister fd, FRegister fs, int cc) {
+  CHECK(!IsR6());
+  CHECK(IsUint<3>(cc)) << cc;
+  EmitFR(0x11, 0x10, static_cast<FRegister>(cc << 2), fs, fd, 0x11);
+}
+
+void MipsAssembler::MovfD(FRegister fd, FRegister fs, int cc) {
+  CHECK(!IsR6());
+  CHECK(IsUint<3>(cc)) << cc;
+  EmitFR(0x11, 0x11, static_cast<FRegister>(cc << 2), fs, fd, 0x11);
+}
+
+void MipsAssembler::MovtS(FRegister fd, FRegister fs, int cc) {
+  CHECK(!IsR6());
+  CHECK(IsUint<3>(cc)) << cc;
+  EmitFR(0x11, 0x10, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11);
+}
+
+void MipsAssembler::MovtD(FRegister fd, FRegister fs, int cc) {
+  CHECK(!IsR6());
+  CHECK(IsUint<3>(cc)) << cc;
+  EmitFR(0x11, 0x11, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11);
+}
+
+void MipsAssembler::SelS(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
+}
+
+void MipsAssembler::SelD(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
+}
+
+void MipsAssembler::ClassS(FRegister fd, FRegister fs) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x1b);
+}
+
+void MipsAssembler::ClassD(FRegister fd, FRegister fs) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x1b);
+}
+
+void MipsAssembler::MinS(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
+}
+
+void MipsAssembler::MinD(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
+}
+
+void MipsAssembler::MaxS(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
+}
+
+void MipsAssembler::MaxD(FRegister fd, FRegister fs, FRegister ft) {
+  CHECK(IsR6());
+  EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
+}
+
 void MipsAssembler::TruncLS(FRegister fd, FRegister fs) {
   EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x09);
 }
@@ -1095,6 +1239,14 @@
   EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x21);
 }
 
+void MipsAssembler::FloorWS(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0xf);
+}
+
+void MipsAssembler::FloorWD(FRegister fd, FRegister fs) {
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0xf);
+}
+
 void MipsAssembler::Mfc1(Register rt, FRegister fs) {
   EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
 }
@@ -2062,11 +2214,19 @@
   }
 }
 
+void MipsAssembler::Bc1f(MipsLabel* label) {
+  Bc1f(0, label);
+}
+
 void MipsAssembler::Bc1f(int cc, MipsLabel* label) {
   CHECK(IsUint<3>(cc)) << cc;
   Bcond(label, kCondF, static_cast<Register>(cc), ZERO);
 }
 
+void MipsAssembler::Bc1t(MipsLabel* label) {
+  Bc1t(0, label);
+}
+
 void MipsAssembler::Bc1t(int cc, MipsLabel* label) {
   CHECK(IsUint<3>(cc)) << cc;
   Bcond(label, kCondT, static_cast<Register>(cc), ZERO);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 2262af4..a7179fd 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -51,6 +51,20 @@
   kStoreDoubleword
 };
 
+// Used to test the values returned by ClassS/ClassD.
+enum FPClassMaskType {
+  kSignalingNaN      = 0x001,
+  kQuietNaN          = 0x002,
+  kNegativeInfinity  = 0x004,
+  kNegativeNormal    = 0x008,
+  kNegativeSubnormal = 0x010,
+  kNegativeZero      = 0x020,
+  kPositiveInfinity  = 0x040,
+  kPositiveNormal    = 0x080,
+  kPositiveSubnormal = 0x100,
+  kPositiveZero      = 0x200,
+};
+
 class MipsLabel : public Label {
  public:
   MipsLabel() : prev_branch_id_plus_one_(0) {}
@@ -191,7 +205,9 @@
   void Bgez(Register rt, uint16_t imm16);
   void Blez(Register rt, uint16_t imm16);
   void Bgtz(Register rt, uint16_t imm16);
+  void Bc1f(uint16_t imm16);  // R2
   void Bc1f(int cc, uint16_t imm16);  // R2
+  void Bc1t(uint16_t imm16);  // R2
   void Bc1t(int cc, uint16_t imm16);  // R2
   void J(uint32_t addr26);
   void Jal(uint32_t addr26);
@@ -227,24 +243,42 @@
   void SubD(FRegister fd, FRegister fs, FRegister ft);
   void MulD(FRegister fd, FRegister fs, FRegister ft);
   void DivD(FRegister fd, FRegister fs, FRegister ft);
+  void SqrtS(FRegister fd, FRegister fs);
+  void SqrtD(FRegister fd, FRegister fs);
+  void AbsS(FRegister fd, FRegister fs);
+  void AbsD(FRegister fd, FRegister fs);
   void MovS(FRegister fd, FRegister fs);
   void MovD(FRegister fd, FRegister fs);
   void NegS(FRegister fd, FRegister fs);
   void NegD(FRegister fd, FRegister fs);
 
+  void CunS(FRegister fs, FRegister ft);  // R2
   void CunS(int cc, FRegister fs, FRegister ft);  // R2
+  void CeqS(FRegister fs, FRegister ft);  // R2
   void CeqS(int cc, FRegister fs, FRegister ft);  // R2
+  void CueqS(FRegister fs, FRegister ft);  // R2
   void CueqS(int cc, FRegister fs, FRegister ft);  // R2
+  void ColtS(FRegister fs, FRegister ft);  // R2
   void ColtS(int cc, FRegister fs, FRegister ft);  // R2
+  void CultS(FRegister fs, FRegister ft);  // R2
   void CultS(int cc, FRegister fs, FRegister ft);  // R2
+  void ColeS(FRegister fs, FRegister ft);  // R2
   void ColeS(int cc, FRegister fs, FRegister ft);  // R2
+  void CuleS(FRegister fs, FRegister ft);  // R2
   void CuleS(int cc, FRegister fs, FRegister ft);  // R2
+  void CunD(FRegister fs, FRegister ft);  // R2
   void CunD(int cc, FRegister fs, FRegister ft);  // R2
+  void CeqD(FRegister fs, FRegister ft);  // R2
   void CeqD(int cc, FRegister fs, FRegister ft);  // R2
+  void CueqD(FRegister fs, FRegister ft);  // R2
   void CueqD(int cc, FRegister fs, FRegister ft);  // R2
+  void ColtD(FRegister fs, FRegister ft);  // R2
   void ColtD(int cc, FRegister fs, FRegister ft);  // R2
+  void CultD(FRegister fs, FRegister ft);  // R2
   void CultD(int cc, FRegister fs, FRegister ft);  // R2
+  void ColeD(FRegister fs, FRegister ft);  // R2
   void ColeD(int cc, FRegister fs, FRegister ft);  // R2
+  void CuleD(FRegister fs, FRegister ft);  // R2
   void CuleD(int cc, FRegister fs, FRegister ft);  // R2
   void CmpUnS(FRegister fd, FRegister fs, FRegister ft);  // R6
   void CmpEqS(FRegister fd, FRegister fs, FRegister ft);  // R6
@@ -266,8 +300,20 @@
   void CmpOrD(FRegister fd, FRegister fs, FRegister ft);  // R6
   void CmpUneD(FRegister fd, FRegister fs, FRegister ft);  // R6
   void CmpNeD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void Movf(Register rd, Register rs, int cc);  // R2
-  void Movt(Register rd, Register rs, int cc);  // R2
+  void Movf(Register rd, Register rs, int cc = 0);  // R2
+  void Movt(Register rd, Register rs, int cc = 0);  // R2
+  void MovfS(FRegister fd, FRegister fs, int cc = 0);  // R2
+  void MovfD(FRegister fd, FRegister fs, int cc = 0);  // R2
+  void MovtS(FRegister fd, FRegister fs, int cc = 0);  // R2
+  void MovtD(FRegister fd, FRegister fs, int cc = 0);  // R2
+  void SelS(FRegister fd, FRegister fs, FRegister ft);  // R6
+  void SelD(FRegister fd, FRegister fs, FRegister ft);  // R6
+  void ClassS(FRegister fd, FRegister fs);  // R6
+  void ClassD(FRegister fd, FRegister fs);  // R6
+  void MinS(FRegister fd, FRegister fs, FRegister ft);  // R6
+  void MinD(FRegister fd, FRegister fs, FRegister ft);  // R6
+  void MaxS(FRegister fd, FRegister fs, FRegister ft);  // R6
+  void MaxD(FRegister fd, FRegister fs, FRegister ft);  // R6
 
   void TruncLS(FRegister fd, FRegister fs);  // R2+, FR=1
   void TruncLD(FRegister fd, FRegister fs);  // R2+, FR=1
@@ -279,6 +325,8 @@
   void Cvtds(FRegister fd, FRegister fs);
   void Cvtsl(FRegister fd, FRegister fs);  // R2+, FR=1
   void Cvtdl(FRegister fd, FRegister fs);  // R2+, FR=1
+  void FloorWS(FRegister fd, FRegister fs);
+  void FloorWD(FRegister fd, FRegister fs);
 
   void Mfc1(Register rt, FRegister fs);
   void Mtc1(Register rt, FRegister fs);
@@ -322,7 +370,9 @@
   void Bge(Register rs, Register rt, MipsLabel* label);
   void Bltu(Register rs, Register rt, MipsLabel* label);
   void Bgeu(Register rs, Register rt, MipsLabel* label);
+  void Bc1f(MipsLabel* label);  // R2
   void Bc1f(int cc, MipsLabel* label);  // R2
+  void Bc1t(MipsLabel* label);  // R2
   void Bc1t(int cc, MipsLabel* label);  // R2
   void Bc1eqz(FRegister ft, MipsLabel* label);  // R6
   void Bc1nez(FRegister ft, MipsLabel* label);  // R6
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index f7efdc5..d9a2f30 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -66,6 +66,7 @@
 #include "interpreter/unstarted_runtime.h"
 #include "jit/offline_profiling_info.h"
 #include "leb128.h"
+#include "linker/multi_oat_relative_patcher.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
@@ -216,7 +217,7 @@
   UsageError("  --image=<file.art>: specifies an output image filename.");
   UsageError("      Example: --image=/system/framework/boot.art");
   UsageError("");
-  UsageError("  --image-format=(uncompressed|lz4):");
+  UsageError("  --image-format=(uncompressed|lz4|lz4hc):");
   UsageError("      Which format to store the image.");
   UsageError("      Example: --image-format=lz4");
   UsageError("      Default: uncompressed");
@@ -680,6 +681,8 @@
     const StringPiece format_str = option.substr(substr.length());
     if (format_str == "lz4") {
       image_storage_mode_ = ImageHeader::kStorageModeLZ4;
+    } else if (format_str == "lz4hc") {
+      image_storage_mode_ = ImageHeader::kStorageModeLZ4HC;
     } else if (format_str == "uncompressed") {
       image_storage_mode_ = ImageHeader::kStorageModeUncompressed;
     } else {
@@ -695,11 +698,6 @@
       Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
     }
 
-    if (IsBootImage()) {
-      // We need the boot image to always be debuggable.
-      compiler_options_->debuggable_ = true;
-    }
-
     if (oat_filenames_.empty() && oat_fd_ == -1) {
       Usage("Output must be supplied with either --oat-file or --oat-fd");
     }
@@ -1265,6 +1263,24 @@
     dex_caches_.clear();
   }
 
+  void LoadClassProfileDescriptors() {
+    if (profile_compilation_info_ != nullptr && app_image_) {
+      Runtime* runtime = Runtime::Current();
+      CHECK(runtime != nullptr);
+      std::set<DexCacheResolvedClasses> resolved_classes(
+          profile_compilation_info_->GetResolvedClasses());
+      image_classes_.reset(new std::unordered_set<std::string>(
+          runtime->GetClassLinker()->GetClassDescriptorsForProfileKeys(resolved_classes)));
+      VLOG(compiler) << "Loaded " << image_classes_->size()
+                     << " image class descriptors from profile";
+      if (VLOG_IS_ON(compiler)) {
+        for (const std::string& s : *image_classes_) {
+          LOG(INFO) << "Image class " << s;
+        }
+      }
+    }
+  }
+
   // Set up the environment for compilation. Includes starting the runtime and loading/opening the
   // boot class path.
   bool Setup() {
@@ -1365,7 +1381,7 @@
         if (opened_dex_files_map != nullptr) {
           opened_dex_files_maps_.push_back(std::move(opened_dex_files_map));
           for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
-            dex_file_oat_filename_map_.emplace(dex_file.get(), oat_filenames_[i]);
+            dex_file_oat_index_map_.emplace(dex_file.get(), i);
             opened_dex_files_.push_back(std::move(dex_file));
           }
         } else {
@@ -1514,13 +1530,12 @@
                                      IsBootImage(),
                                      image_classes_.release(),
                                      compiled_classes_.release(),
-                                     nullptr,
+                                     /* compiled_methods */ nullptr,
                                      thread_count_,
                                      dump_stats_,
                                      dump_passes_,
                                      compiler_phases_timings_.get(),
                                      swap_fd_,
-                                     &dex_file_oat_filename_map_,
                                      profile_compilation_info_.get()));
     driver_->SetDexFilesForOatFile(dex_files_);
     driver_->CompileAll(class_loader_, dex_files_, timings_);
@@ -1613,7 +1628,10 @@
         // The non moving space is right after the oat file. Put the preferred app image location
         // right after the non moving space so that we ideally get a continuous immune region for
         // the GC.
-        const size_t non_moving_space_capacity = heap->GetNonMovingSpace()->Capacity();
+        // Use the default non moving space capacity since dex2oat does not have a separate non-
+        // moving space. This means the runtime's non moving space space size will be as large
+        // as the growth limit for dex2oat, but smaller in the zygote.
+        const size_t non_moving_space_capacity = gc::Heap::kDefaultNonMovingSpaceCapacity;
         image_base_ += non_moving_space_capacity;
         VLOG(compiler) << "App image base=" << reinterpret_cast<void*>(image_base_);
       }
@@ -1624,7 +1642,7 @@
                                           IsAppImage(),
                                           image_storage_mode_,
                                           oat_filenames_,
-                                          dex_file_oat_filename_map_));
+                                          dex_file_oat_index_map_));
 
       // We need to prepare method offsets in the image address space for direct method patching.
       TimingLogger::ScopedTiming t2("dex2oat Prepare image address space", timings_);
@@ -1634,21 +1652,39 @@
       }
     }
 
+    linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
     {
       TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
       for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
+        std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i];
+        std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
+
+        std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
+        oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files, &patcher);
+
+        size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
+        size_t text_size = oat_writer->GetSize() - rodata_size;
+        elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize());
+
+        if (IsImage()) {
+          // Update oat layout.
+          DCHECK(image_writer_ != nullptr);
+          DCHECK_LT(i, oat_filenames_.size());
+          image_writer_->UpdateOatFileLayout(i,
+                                             elf_writer->GetLoadedSize(),
+                                             oat_writer->GetOatDataOffset(),
+                                             oat_writer->GetSize());
+        }
+      }
+
+      for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
         std::unique_ptr<File>& oat_file = oat_files_[i];
         std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i];
         std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
 
-        std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
-        oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files);
-
         // We need to mirror the layout of the ELF file in the compressed debug-info.
-        // Therefore we need to propagate the sizes of at least those sections.
-        size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
-        size_t text_size = oat_writer->GetSize() - rodata_size;
-        elf_writer->PrepareDebugInfo(rodata_size, text_size, oat_writer->GetMethodDebugInfo());
+        // Therefore PrepareDebugInfo() relies on the SetLoadedSectionSizes() call further above.
+        elf_writer->PrepareDebugInfo(oat_writer->GetMethodDebugInfo());
 
         OutputStream*& rodata = rodata_[i];
         DCHECK(rodata != nullptr);
@@ -1674,7 +1710,13 @@
           return false;
         }
 
-        elf_writer->SetBssSize(oat_writer->GetBssSize());
+        if (IsImage()) {
+          // Update oat header information.
+          DCHECK(image_writer_ != nullptr);
+          DCHECK_LT(i, oat_filenames_.size());
+          image_writer_->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
+        }
+
         elf_writer->WriteDynamicSection();
         elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
         elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
@@ -1688,19 +1730,10 @@
         if (oat_files_[i] != nullptr) {
           if (oat_files_[i]->Flush() != 0) {
             PLOG(ERROR) << "Failed to flush oat file: " << oat_filenames_[i];
-            oat_files_[i]->Erase();
             return false;
           }
         }
 
-        if (IsImage()) {
-          // Update oat estimates.
-          DCHECK(image_writer_ != nullptr);
-          DCHECK_LT(i, oat_filenames_.size());
-
-          image_writer_->UpdateOatFile(oat_file.get(), oat_filenames_[i]);
-        }
-
         VLOG(compiler) << "Oat file written successfully: " << oat_filenames_[i];
 
         oat_writer.reset();
@@ -2194,22 +2227,21 @@
     }
     if (!image_writer_->Write(app_image_fd_,
                               image_filenames_,
-                              oat_fd_,
-                              oat_filenames_,
-                              oat_location_)) {
+                              oat_filenames_)) {
       LOG(ERROR) << "Failure during image file creation";
       return false;
     }
 
     // We need the OatDataBegin entries.
-    std::map<const char*, uintptr_t> oat_data_begins;
-    for (const char* oat_filename : oat_filenames_) {
-      oat_data_begins.emplace(oat_filename, image_writer_->GetOatDataBegin(oat_filename));
+    dchecked_vector<uintptr_t> oat_data_begins;
+    for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
+      oat_data_begins.push_back(image_writer_->GetOatDataBegin(i));
     }
     // Destroy ImageWriter before doing FixupElf.
     image_writer_.reset();
 
-    for (const char* oat_filename : oat_filenames_) {
+    for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
+      const char* oat_filename = oat_filenames_[i];
       // Do not fix up the ELF file if we are --compile-pic or compiling the app image
       if (!compiler_options_->GetCompilePic() && IsBootImage()) {
         std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
@@ -2218,9 +2250,7 @@
           return false;
         }
 
-        uintptr_t oat_data_begin = oat_data_begins.find(oat_filename)->second;
-
-        if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
+        if (!ElfWriter::Fixup(oat_file.get(), oat_data_begins[i])) {
           oat_file->Erase();
           LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
           return false;
@@ -2434,7 +2464,7 @@
   TimingLogger* timings_;
   std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
   std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
-  std::unordered_map<const DexFile*, const char*> dex_file_oat_filename_map_;
+  std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_;
 
   // Backing storage.
   std::vector<std::string> char_backing_storage_;
@@ -2466,6 +2496,7 @@
 }
 
 static int CompileImage(Dex2Oat& dex2oat) {
+  dex2oat.LoadClassProfileDescriptors();
   dex2oat.Compile();
 
   if (!dex2oat.WriteOatFiles()) {
@@ -2550,16 +2581,20 @@
 
   TimingLogger timings("compiler", false, false);
 
-  Dex2Oat dex2oat(&timings);
+  // Allocate `dex2oat` on the heap instead of on the stack, as Clang
+  // might produce a stack frame too large for this function or for
+  // functions inlining it (such as main), that would not fit the
+  // requirements of the `-Wframe-larger-than` option.
+  std::unique_ptr<Dex2Oat> dex2oat = MakeUnique<Dex2Oat>(&timings);
 
   // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
-  dex2oat.ParseArgs(argc, argv);
+  dex2oat->ParseArgs(argc, argv);
 
   // Process profile information and assess if we need to do a profile guided compilation.
   // This operation involves I/O.
-  if (dex2oat.UseProfileGuidedCompilation()) {
-    if (dex2oat.LoadProfile()) {
-      if (!dex2oat.ShouldCompileBasedOnProfiles()) {
+  if (dex2oat->UseProfileGuidedCompilation()) {
+    if (dex2oat->LoadProfile()) {
+      if (!dex2oat->ShouldCompileBasedOnProfiles()) {
         LOG(INFO) << "Skipped compilation because of insignificant profile delta";
         return EXIT_SUCCESS;
       }
@@ -2570,7 +2605,7 @@
   }
 
   // Check early that the result of compilation can be written
-  if (!dex2oat.OpenFile()) {
+  if (!dex2oat->OpenFile()) {
     return EXIT_FAILURE;
   }
 
@@ -2580,25 +2615,25 @@
   //   3) Compiling with --host
   //   4) Compiling on the host (not a target build)
   // Otherwise, print a stripped command line.
-  if (kIsDebugBuild || dex2oat.IsBootImage() || dex2oat.IsHost() || !kIsTargetBuild) {
+  if (kIsDebugBuild || dex2oat->IsBootImage() || dex2oat->IsHost() || !kIsTargetBuild) {
     LOG(INFO) << CommandLine();
   } else {
     LOG(INFO) << StrippedCommandLine();
   }
 
-  if (!dex2oat.Setup()) {
-    dex2oat.EraseOatFiles();
+  if (!dex2oat->Setup()) {
+    dex2oat->EraseOatFiles();
     return EXIT_FAILURE;
   }
 
   bool result;
-  if (dex2oat.IsImage()) {
-    result = CompileImage(dex2oat);
+  if (dex2oat->IsImage()) {
+    result = CompileImage(*dex2oat);
   } else {
-    result = CompileApp(dex2oat);
+    result = CompileApp(*dex2oat);
   }
 
-  dex2oat.Shutdown();
+  dex2oat->Shutdown();
   return result;
 }
 }  // namespace art
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index af08fc4..c187536 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -145,7 +145,9 @@
       bss->WriteNoBitsSection(oat_file_->BssSize());
     }
 
-    builder_->WriteDynamicSection(elf_file->GetPath());
+    builder_->PrepareDynamicSection(
+        elf_file->GetPath(), rodata_size, text_size, oat_file_->BssSize());
+    builder_->WriteDynamicSection();
 
     Walk(&art::OatSymbolizer::RegisterForDedup);
 
@@ -359,6 +361,7 @@
                    const char* method_filter,
                    bool list_classes,
                    bool list_methods,
+                   bool dump_header_only,
                    const char* export_dex_location,
                    uint32_t addr2instr)
     : dump_raw_mapping_table_(dump_raw_mapping_table),
@@ -371,6 +374,7 @@
       method_filter_(method_filter),
       list_classes_(list_classes),
       list_methods_(list_methods),
+      dump_header_only_(dump_header_only),
       export_dex_location_(export_dex_location),
       addr2instr_(addr2instr),
       class_loader_(nullptr) {}
@@ -385,6 +389,7 @@
   const char* const method_filter_;
   const bool list_classes_;
   const bool list_methods_;
+  const bool dump_header_only_;
   const char* const export_dex_location_;
   uint32_t addr2instr_;
   Handle<mirror::ClassLoader>* class_loader_;
@@ -512,21 +517,24 @@
       os << StringPrintf("0x%08x\n\n", resolved_addr2instr_);
     }
 
-    for (size_t i = 0; i < oat_dex_files_.size(); i++) {
-      const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
-      CHECK(oat_dex_file != nullptr);
+    if (!options_.dump_header_only_) {
+      for (size_t i = 0; i < oat_dex_files_.size(); i++) {
+        const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+        CHECK(oat_dex_file != nullptr);
 
-      // If file export selected skip file analysis
-      if (options_.export_dex_location_) {
-        if (!ExportDexFile(os, *oat_dex_file)) {
-          success = false;
-        }
-      } else {
-        if (!DumpOatDexFile(os, *oat_dex_file)) {
-          success = false;
+        // If file export selected skip file analysis
+        if (options_.export_dex_location_) {
+          if (!ExportDexFile(os, *oat_dex_file)) {
+            success = false;
+          }
+        } else {
+          if (!DumpOatDexFile(os, *oat_dex_file)) {
+            success = false;
+          }
         }
       }
     }
+
     os << std::flush;
     return success;
   }
@@ -2570,6 +2578,8 @@
       dump_code_info_stack_maps_ = true;
     } else if (option == "--no-disassemble") {
       disassemble_code_ = false;
+    } else if (option =="--header-only") {
+      dump_header_only_ = true;
     } else if (option.starts_with("--symbolize=")) {
       oat_filename_ = option.substr(strlen("--symbolize=")).data();
       symbolize_ = true;
@@ -2653,6 +2663,9 @@
         "  --no-disassemble may be used to disable disassembly.\n"
         "      Example: --no-disassemble\n"
         "\n"
+        "  --header-only may be used to print only the oat header.\n"
+        "      Example: --header-only\n"
+        "\n"
         "  --list-classes may be used to list target file classes (can be used with filters).\n"
         "      Example: --list-classes\n"
         "      Example: --list-classes --class-filter=com.example.foo\n"
@@ -2695,6 +2708,7 @@
   bool symbolize_ = false;
   bool list_classes_ = false;
   bool list_methods_ = false;
+  bool dump_header_only_ = false;
   uint32_t addr2instr_ = 0;
   const char* export_dex_location_ = nullptr;
 };
@@ -2717,6 +2731,7 @@
         args_->method_filter_,
         args_->list_classes_,
         args_->list_methods_,
+        args_->dump_header_only_,
         args_->export_dex_location_,
         args_->addr2instr_));
 
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 3faa8eb..b0d5df2 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -37,8 +37,8 @@
     std::string dex_location2 = "location2" + id;
     uint32_t dex_location_checksum2 = 10 * checksum;
     for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
-      ASSERT_TRUE(info->AddData(dex_location1, dex_location_checksum1, i));
-      ASSERT_TRUE(info->AddData(dex_location2, dex_location_checksum2, i));
+      ASSERT_TRUE(info->AddMethodIndex(dex_location1, dex_location_checksum1, i));
+      ASSERT_TRUE(info->AddMethodIndex(dex_location2, dex_location_checksum2, i));
     }
     ASSERT_TRUE(info->Save(GetFd(profile)));
     ASSERT_EQ(0, profile.GetFile()->Flush());
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 947de8a..500fa14 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -292,7 +292,8 @@
 # Note that the fault_handler_x86.cc is not a mistake.  This file is
 # shared between the x86 and x86_64 architectures.
 LIBART_SRC_FILES_x86_64 := \
-  interpreter/mterp/mterp_stub.cc \
+  interpreter/mterp/mterp.cc \
+  interpreter/mterp/out/mterp_x86_64.S \
   arch/x86_64/context_x86_64.cc \
   arch/x86_64/entrypoints_init_x86_64.cc \
   arch/x86_64/jni_entrypoints_x86_64.S \
@@ -306,7 +307,8 @@
   $(LIBART_SRC_FILES_x86_64) \
 
 LIBART_TARGET_SRC_FILES_mips := \
-  interpreter/mterp/mterp_stub.cc \
+  interpreter/mterp/mterp.cc \
+  interpreter/mterp/out/mterp_mips.S \
   arch/mips/context_mips.cc \
   arch/mips/entrypoints_init_mips.cc \
   arch/mips/jni_entrypoints_mips.S \
@@ -316,7 +318,8 @@
   arch/mips/fault_handler_mips.cc
 
 LIBART_TARGET_SRC_FILES_mips64 := \
-  interpreter/mterp/mterp_stub.cc \
+  interpreter/mterp/mterp.cc \
+  interpreter/mterp/out/mterp_mips64.S \
   arch/mips64/context_mips64.cc \
   arch/mips64/entrypoints_init_mips64.cc \
   arch/mips64/jni_entrypoints_mips64.S \
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c4e314b..cfcef49 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -942,7 +942,86 @@
 
 // Generate the allocation entrypoints for each allocator.
 GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_tlab
+    // Fast path tlab allocation.
+    // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
+    // r2, r3, r12: free.
+#if defined(USE_READ_BARRIER)
+    eor    r0, r0, r0                                         // Read barrier not supported here.
+    sub    r0, r0, #1                                         // Return -1.
+    bx     lr
+#endif
+    ldr    r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32]    // Load dex cache resolved types array
+                                                              // Load the class (r2)
+    ldr    r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+    cbz    r2, .Lart_quick_alloc_object_tlab_slow_path        // Check null class
+                                                              // Check class status.
+    ldr    r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
+    cmp    r3, #MIRROR_CLASS_STATUS_INITIALIZED
+    bne    .Lart_quick_alloc_object_tlab_slow_path
+                                                              // Add a fake dependence from the
+                                                              // following access flag and size
+                                                              // loads to the status load.
+                                                              // This is to prevent those loads
+                                                              // from being reordered above the
+                                                              // status load and reading wrong
+                                                              // values (an alternative is to use
+                                                              // a load-acquire for the status).
+    eor    r3, r3, r3
+    add    r2, r2, r3
+                                                              // Check access flags has
+                                                              // kAccClassIsFinalizable.
+    ldr    r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+    tst    r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+    bne    .Lart_quick_alloc_object_tlab_slow_path
+                                                              // Load thread_local_pos (r12) and
+                                                              // thread_local_end (r3) with ldrd.
+                                                              // Check constraints for ldrd.
+#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
+#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
+#endif
+    ldrd   r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+    sub    r12, r3, r12                                       // Compute the remaining buf size.
+    ldr    r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET]         // Load the object size (r3).
+    cmp    r3, r12                                            // Check if it fits. OK to do this
+                                                              // before rounding up the object size
+                                                              // assuming the buf size alignment.
+    bhi    .Lart_quick_alloc_object_tlab_slow_path
+    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+                                                              // Round up the object size by the
+                                                              // object alignment. (addr + 7) & ~7.
+    add    r3, r3, #OBJECT_ALIGNMENT_MASK
+    and    r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED
+                                                              // Reload old thread_local_pos (r0)
+                                                              // for the return value.
+    ldr    r0, [r9, #THREAD_LOCAL_POS_OFFSET]
+    add    r1, r0, r3
+    str    r1, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
+    ldr    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    add    r1, r1, #1
+    str    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    POISON_HEAP_REF r2
+    str    r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
+                                                              // Fence. This is "ish" not "ishst" so
+                                                              // that the code after this allocation
+                                                              // site will see the right values in
+                                                              // the fields of the class.
+                                                              // Alternatively we could use "ishst"
+                                                              // if we use load-acquire for the
+                                                              // class status load.)
+    dmb    ish
+    bx     lr
+.Lart_quick_alloc_object_tlab_slow_path:
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  r2, r3                 // Save callee saves in case of GC.
+    mov    r2, r9                                             // Pass Thread::Current.
+    bl     artAllocObjectFromCodeTLAB    // (uint32_t type_idx, Method* method, Thread*)
+    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_tlab
+
+
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
 ENTRY art_quick_alloc_object_rosalloc
     // Fast path rosalloc allocation.
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
index 1096af0..ae01bd5 100644
--- a/runtime/arch/mips/registers_mips.h
+++ b/runtime/arch/mips/registers_mips.h
@@ -100,6 +100,7 @@
   F29 = 29,
   F30 = 30,
   F31 = 31,
+  FTMP = F8,  // scratch register
   kNumberOfFRegisters = 32,
   kNoFRegister = -1,
 };
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index b027c95..81fae72 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -101,6 +101,7 @@
   F29 = 29,
   F30 = 30,
   F31 = 31,
+  FTMP = F8,  // scratch register
   kNumberOfFpuRegisters = 32,
   kNoFpuRegister = -1,
 };
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index d4b873e..d5807e2 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -175,12 +175,16 @@
 #elif defined(__aarch64__)
     __asm__ __volatile__(
         // Spill x0-x7 which we say we don't clobber. May contain args.
-        "sub sp, sp, #64\n\t"
-        ".cfi_adjust_cfa_offset 64\n\t"
+        "sub sp, sp, #80\n\t"
+        ".cfi_adjust_cfa_offset 80\n\t"
         "stp x0, x1, [sp]\n\t"
         "stp x2, x3, [sp, #16]\n\t"
         "stp x4, x5, [sp, #32]\n\t"
         "stp x6, x7, [sp, #48]\n\t"
+        // To be extra defensive, store x20. We do this because some of the stubs might make a
+        // transition into the runtime via the blr instruction below and *not* save x20.
+        "str x20, [sp, #64]\n\t"
+        // 8 byte buffer
 
         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
         ".cfi_adjust_cfa_offset 16\n\t"
@@ -279,8 +283,9 @@
         "ldp x2, x3, [sp, #16]\n\t"
         "ldp x4, x5, [sp, #32]\n\t"
         "ldp x6, x7, [sp, #48]\n\t"
-        "add sp, sp, #64\n\t"         // Free stack space, now sp as on entry
-        ".cfi_adjust_cfa_offset -64\n\t"
+        "ldr x20, [sp, #64]\n\t"
+        "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
+        ".cfi_adjust_cfa_offset -80\n\t"
 
         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
         "mov %[result], x8\n\t"              // Store the call result
@@ -298,13 +303,17 @@
           // Use the result from r0
         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
-        : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
+          // Leave one register unclobbered, which is needed for compiling with
+          // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
+          // which means we should unclobber one of the callee-saved registers that are unused.
+          // Here we use x20.
+        : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
-          "memory");  // clobber.
+          "memory");
 #elif defined(__mips__) && !defined(__LP64__)
     __asm__ __volatile__ (
         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index cd38e16..a60f31e 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -411,7 +411,12 @@
       DCHECK(method_header->Contains(pc));
       return method_header;
     } else {
-      DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc))) << std::hex << pc;
+      DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc)))
+          << PrettyMethod(this)
+          << ", pc=" << std::hex << pc
+          << ", entry_point=" << std::hex << reinterpret_cast<uintptr_t>(existing_entry_point)
+          << ", copy=" << std::boolalpha << IsCopied()
+          << ", proxy=" << std::boolalpha << IsProxyMethod();
     }
   }
 
diff --git a/runtime/art_method.h b/runtime/art_method.h
index f3e8d6b..ec00a7b 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -132,9 +132,12 @@
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  // Returns true if this method might be copied from another class.
-  bool MightBeCopied() {
-    return IsMiranda() || IsDefault() || IsDefaultConflicting();
+  bool IsCopied() {
+    const bool copied = (GetAccessFlags() & kAccCopied) != 0;
+    // (IsMiranda() || IsDefaultConflicting()) implies copied
+    DCHECK(!(IsMiranda() || IsDefaultConflicting()) || copied)
+        << "Miranda or default-conflict methods must always be copied.";
+    return copied;
   }
 
   bool IsMiranda() {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index eb3b7f3..879364e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -121,32 +121,32 @@
 ADD_TEST_EQ(THREAD_SELF_OFFSET,
             art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
 
+// Offset of field Thread::tlsPtr_.thread_local_objects.
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
+            art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 169 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + 2 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
             art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.thread_local_end.
 #define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
             art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
-            art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
             art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.mterp_default_ibase.
-#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
+#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
             art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.mterp_alt_ibase.
-#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
+#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
             art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 6 * __SIZEOF_POINTER__)
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
             art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
diff --git a/runtime/atomic.cc b/runtime/atomic.cc
index e766a8d..d5ae570 100644
--- a/runtime/atomic.cc
+++ b/runtime/atomic.cc
@@ -28,7 +28,7 @@
 }
 
 void QuasiAtomic::Startup() {
-  if (kNeedSwapMutexes) {
+  if (NeedSwapMutexes(kRuntimeISA)) {
     gSwapMutexes = new std::vector<Mutex*>;
     for (size_t i = 0; i < kSwapMutexCount; ++i) {
       gSwapMutexes->push_back(new Mutex("QuasiAtomic stripe", kSwapMutexesLock));
@@ -37,7 +37,7 @@
 }
 
 void QuasiAtomic::Shutdown() {
-  if (kNeedSwapMutexes) {
+  if (NeedSwapMutexes(kRuntimeISA)) {
     STLDeleteElements(gSwapMutexes);
     delete gSwapMutexes;
   }
diff --git a/runtime/atomic.h b/runtime/atomic.h
index d4a7f37..e2a7259 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -22,6 +22,7 @@
 #include <limits>
 #include <vector>
 
+#include "arch/instruction_set.h"
 #include "base/logging.h"
 #include "base/macros.h"
 
@@ -44,14 +45,10 @@
 // quasiatomic operations that are performed on partially-overlapping
 // memory.
 class QuasiAtomic {
-#if defined(__mips__) && !defined(__LP64__)
-  static constexpr bool kNeedSwapMutexes = true;
-#elif defined(__mips__) && defined(__LP64__)
-  // TODO - mips64 still need this for Cas64 ???
-  static constexpr bool kNeedSwapMutexes = true;
-#else
-  static constexpr bool kNeedSwapMutexes = false;
-#endif
+  static constexpr bool NeedSwapMutexes(InstructionSet isa) {
+    // TODO - mips64 still need this for Cas64 ???
+    return (isa == kMips) || (isa == kMips64);
+  }
 
  public:
   static void Startup();
@@ -60,7 +57,7 @@
 
   // Reads the 64-bit value at "addr" without tearing.
   static int64_t Read64(volatile const int64_t* addr) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
       int64_t value;
 #if defined(__LP64__)
       value = *addr;
@@ -96,7 +93,7 @@
 
   // Writes to the 64-bit value at "addr" without tearing.
   static void Write64(volatile int64_t* addr, int64_t value) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
 #if defined(__LP64__)
       *addr = value;
 #else
@@ -142,7 +139,7 @@
   // at some point during the execution of Cas64, *addr was not equal to
   // old_value.
   static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
       return __sync_bool_compare_and_swap(addr, old_value, new_value);
     } else {
       return SwapMutexCas64(old_value, new_value, addr);
@@ -150,8 +147,8 @@
   }
 
   // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
-  static bool LongAtomicsUseMutexes() {
-    return kNeedSwapMutexes;
+  static bool LongAtomicsUseMutexes(InstructionSet isa) {
+    return NeedSwapMutexes(isa);
   }
 
   static void ThreadFenceAcquire() {
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index a4b38ea..44af3f7 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -183,10 +183,10 @@
   free(reinterpret_cast<void*>(memory_));
 }
 
-MemMapArena::MemMapArena(size_t size, bool low_4gb) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
   std::string error_msg;
   map_.reset(MemMap::MapAnonymous(
-      "LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
+      name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
   CHECK(map_.get() != nullptr) << error_msg;
   memory_ = map_->Begin();
   size_ = map_->Size();
@@ -210,9 +210,12 @@
   }
 }
 
-ArenaPool::ArenaPool(bool use_malloc, bool low_4gb)
-    : use_malloc_(use_malloc), lock_("Arena pool lock", kArenaPoolLock), free_arenas_(nullptr),
-      low_4gb_(low_4gb) {
+ArenaPool::ArenaPool(bool use_malloc, bool low_4gb, const char* name)
+    : use_malloc_(use_malloc),
+      lock_("Arena pool lock", kArenaPoolLock),
+      free_arenas_(nullptr),
+      low_4gb_(low_4gb),
+      name_(name) {
   if (low_4gb) {
     CHECK(!use_malloc) << "low4gb must use map implementation";
   }
@@ -250,7 +253,7 @@
   }
   if (ret == nullptr) {
     ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) :
-        new MemMapArena(size, low_4gb_);
+        new MemMapArena(size, low_4gb_, name_);
   }
   ret->Reset();
   return ret;
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 8a96571..728f897 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -261,7 +261,7 @@
 
 class MemMapArena FINAL : public Arena {
  public:
-  MemMapArena(size_t size, bool low_4gb);
+  MemMapArena(size_t size, bool low_4gb, const char* name);
   virtual ~MemMapArena();
   void Release() OVERRIDE;
 
@@ -271,7 +271,9 @@
 
 class ArenaPool {
  public:
-  explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
+  ArenaPool(bool use_malloc = true,
+            bool low_4gb = false,
+            const char* name = "LinearAlloc");
   ~ArenaPool();
   Arena* AllocArena(size_t size) REQUIRES(!lock_);
   void FreeArenaChain(Arena* first) REQUIRES(!lock_);
@@ -287,6 +289,7 @@
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   Arena* free_arenas_ GUARDED_BY(lock_);
   const bool low_4gb_;
+  const char* name_;
   DISALLOW_COPY_AND_ASSIGN(ArenaPool);
 };
 
diff --git a/runtime/base/bit_field.h b/runtime/base/bit_field.h
index fd65d50..a80ca28 100644
--- a/runtime/base/bit_field.h
+++ b/runtime/base/bit_field.h
@@ -26,9 +26,18 @@
 
 // BitField is a template for encoding and decoding a bit field inside
 // an unsigned machine word.
-template<typename T, int position, int size>
+template<typename T, size_t kPosition, size_t kSize>
 class BitField {
  public:
+  typedef T value_type;
+  static constexpr size_t position = kPosition;
+  static constexpr size_t size = kSize;
+
+  static_assert(position < sizeof(uintptr_t) * kBitsPerByte, "Invalid position.");
+  static_assert(size != 0u, "Invalid size.");
+  static_assert(size <= sizeof(uintptr_t) * kBitsPerByte, "Invalid size.");
+  static_assert(size + position <= sizeof(uintptr_t) * kBitsPerByte, "Invalid position + size.");
+
   // Tells whether the provided value fits into the bit field.
   static bool IsValid(T value) {
     return (static_cast<uintptr_t>(value) & ~((kUintPtrTOne << size) - 1)) == 0;
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 9b55e70..424ebb7 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -229,6 +229,11 @@
    */
   int GetHighestBitSet() const;
 
+  // Minimum number of bits required to store this vector, 0 if none are set.
+  size_t GetNumberOfBits() const {
+    return GetHighestBitSet() + 1;
+  }
+
   // Is bit set in storage. (No range check.)
   static bool IsBitSet(const uint32_t* storage, uint32_t idx) {
     return (storage[WordIndex(idx)] & BitMask(idx)) != 0;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e72f2a2..293451c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -66,8 +66,8 @@
   kRosAllocGlobalLock,
   kRosAllocBracketLock,
   kRosAllocBulkFreeLock,
-  kTransactionLogLock,
   kMarkSweepMarkStackLock,
+  kTransactionLogLock,
   kJniWeakGlobalsLock,
   kReferenceQueueSoftReferencesLock,
   kReferenceQueuePhantomReferencesLock,
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index b9ea475..fcf3326 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -100,8 +100,7 @@
           CHECK_EQ(location.GetValue(), 0);
           break;
         default:
-          LOG(FATAL) << "Unexpected location kind"
-                     << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+          LOG(FATAL) << "Unexpected location kind " << location.GetInternalKind();
       }
     }
   }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 936c988..5f26b5d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -58,6 +58,7 @@
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
+#include "jit/offline_profiling_info.h"
 #include "leb128.h"
 #include "linear_alloc.h"
 #include "mirror/class.h"
@@ -759,7 +760,7 @@
     SHARED_REQUIRES(Locks::mutator_lock_) {
   if (m->IsRuntimeMethod()) {
     CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
-  } else if (m->MightBeCopied()) {
+  } else if (m->IsCopied()) {
     CHECK(m->GetDeclaringClass() != nullptr) << PrettyMethod(m);
   } else if (expected_class != nullptr) {
     CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << PrettyMethod(m);
@@ -1137,18 +1138,18 @@
 
   virtual void Visit(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
     GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(sizeof(void*));
-    const bool maybe_copied = method->MightBeCopied();
+    const bool is_copied = method->IsCopied();
     if (resolved_types != nullptr) {
       bool in_image_space = false;
-      if (kIsDebugBuild || maybe_copied) {
+      if (kIsDebugBuild || is_copied) {
         in_image_space = header_.GetImageSection(ImageHeader::kSectionDexCacheArrays).Contains(
             reinterpret_cast<const uint8_t*>(resolved_types) - header_.GetImageBegin());
       }
       // Must be in image space for non-miranda method.
-      DCHECK(maybe_copied || in_image_space)
+      DCHECK(is_copied || in_image_space)
           << resolved_types << " is not in image starting at "
           << reinterpret_cast<void*>(header_.GetImageBegin());
-      if (!maybe_copied || in_image_space) {
+      if (!is_copied || in_image_space) {
         // Go through the array so that we don't need to do a slow map lookup.
         method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
                                          sizeof(void*));
@@ -1157,15 +1158,15 @@
     ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(sizeof(void*));
     if (resolved_methods != nullptr) {
       bool in_image_space = false;
-      if (kIsDebugBuild || maybe_copied) {
+      if (kIsDebugBuild || is_copied) {
         in_image_space = header_.GetImageSection(ImageHeader::kSectionDexCacheArrays).Contains(
               reinterpret_cast<const uint8_t*>(resolved_methods) - header_.GetImageBegin());
       }
       // Must be in image space for non-miranda method.
-      DCHECK(maybe_copied || in_image_space)
+      DCHECK(is_copied || in_image_space)
           << resolved_methods << " is not in image starting at "
           << reinterpret_cast<void*>(header_.GetImageBegin());
-      if (!maybe_copied || in_image_space) {
+      if (!is_copied || in_image_space) {
         // Go through the array so that we don't need to do a slow map lookup.
         method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
                                            sizeof(void*));
@@ -1615,7 +1616,8 @@
           VLOG(image) << name->ToModifiedUtf8();
         }
         *error_msg = "Rejecting application image due to class loader mismatch";
-        return false;
+        // Ignore class loader mismatch for now since these would just use possibly incorrect
+        // oat code anyways. The structural class check should be done in the parent.
       }
     }
   }
@@ -2608,18 +2610,6 @@
   return nullptr;
 }
 
-const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file,
-                                            uint16_t class_def_idx,
-                                            uint32_t method_idx) {
-  bool found;
-  OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
-  if (!found) {
-    return nullptr;
-  }
-  uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
-  return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
-}
-
 bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code) {
   if (UNLIKELY(method->IsNative() || method->IsProxyMethod())) {
     return false;
@@ -2650,6 +2640,11 @@
     return true;
   }
 
+  if (Dbg::IsDebuggerActive()) {
+    // Boot image classes are AOT-compiled as non-debuggable.
+    return runtime->GetHeap()->IsInBootImageOatFile(quick_code);
+  }
+
   return false;
 }
 
@@ -2860,8 +2855,9 @@
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
   LinearAlloc* allocator = class_loader->GetAllocator();
   if (allocator == nullptr) {
-    allocator = Runtime::Current()->CreateLinearAlloc();
-    class_loader->SetAllocator(allocator);
+    RegisterClassLoader(class_loader);
+    allocator = class_loader->GetAllocator();
+    CHECK(allocator != nullptr);
   }
   return allocator;
 }
@@ -4822,24 +4818,31 @@
   Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class);
 }
 
+void ClassLinker::RegisterClassLoader(mirror::ClassLoader* class_loader) {
+  CHECK(class_loader->GetAllocator() == nullptr);
+  CHECK(class_loader->GetClassTable() == nullptr);
+  Thread* const self = Thread::Current();
+  ClassLoaderData data;
+  data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
+  // Create and set the class table.
+  data.class_table = new ClassTable;
+  class_loader->SetClassTable(data.class_table);
+  // Create and set the linear allocator.
+  data.allocator = Runtime::Current()->CreateLinearAlloc();
+  class_loader->SetAllocator(data.allocator);
+  // Add to the list so that we know to free the data later.
+  class_loaders_.push_back(data);
+}
+
 ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) {
   if (class_loader == nullptr) {
     return &boot_class_table_;
   }
   ClassTable* class_table = class_loader->GetClassTable();
   if (class_table == nullptr) {
-    class_table = new ClassTable;
-    Thread* const self = Thread::Current();
-    ClassLoaderData data;
-    data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
-    data.class_table = class_table;
-    // Don't already have a class table, add it to the class loader.
-    CHECK(class_loader->GetClassTable() == nullptr);
-    class_loader->SetClassTable(data.class_table);
-    // Should have been set when we registered the dex file.
-    data.allocator = class_loader->GetAllocator();
-    CHECK(data.allocator != nullptr);
-    class_loaders_.push_back(data);
+    RegisterClassLoader(class_loader);
+    class_table = class_loader->GetClassTable();
+    DCHECK(class_table != nullptr);
   }
   return class_table;
 }
@@ -6459,7 +6462,7 @@
     for (ArtMethod* mir_method : miranda_methods) {
       ArtMethod& new_method = *out;
       new_method.CopyFrom(mir_method, image_pointer_size_);
-      new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda);
+      new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda | kAccCopied);
       DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
           << "Miranda method should be abstract!";
       move_table.emplace(mir_method, &new_method);
@@ -6478,7 +6481,9 @@
       // yet it shouldn't have methods that are skipping access checks.
       // TODO This is rather arbitrary. We should maybe support classes where only some of its
       // methods are skip_access_checks.
-      new_method.SetAccessFlags((new_method.GetAccessFlags() | kAccDefault) & ~kAccSkipAccessChecks);
+      constexpr uint32_t kSetFlags = kAccDefault | kAccCopied;
+      constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
+      new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
       move_table.emplace(def_method, &new_method);
       ++out;
     }
@@ -6489,7 +6494,7 @@
       // this as a default, non-abstract method, since thats what it is. Also clear the
       // kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
       // methods that are skipping access checks.
-      constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict;
+      constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied;
       constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
       new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
       DCHECK(new_method.IsDefaultConflicting());
@@ -7633,6 +7638,116 @@
   }
 }
 
+std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
+  ScopedObjectAccess soa(Thread::Current());
+  ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__);
+  std::set<DexCacheResolvedClasses> ret;
+  VLOG(class_linker) << "Collecting resolved classes";
+  const uint64_t start_time = NanoTime();
+  ReaderMutexLock mu(soa.Self(), *DexLock());
+  // Loop through all the dex caches and inspect resolved classes.
+  for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
+    if (soa.Self()->IsJWeakCleared(data.weak_root)) {
+      continue;
+    }
+    mirror::DexCache* dex_cache =
+        down_cast<mirror::DexCache*>(soa.Self()->DecodeJObject(data.weak_root));
+    if (dex_cache == nullptr) {
+      continue;
+    }
+    const DexFile* dex_file = dex_cache->GetDexFile();
+    const std::string& location = dex_file->GetLocation();
+    const size_t num_class_defs = dex_file->NumClassDefs();
+    // Use the resolved types, this will miss array classes.
+    const size_t num_types = dex_file->NumTypeIds();
+    VLOG(class_linker) << "Collecting class profile for dex file " << location
+                       << " types=" << num_types << " class_defs=" << num_class_defs;
+    DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(),
+                                             dex_file->GetLocationChecksum());
+    size_t num_resolved = 0;
+    std::unordered_set<uint16_t> class_set;
+    CHECK_EQ(num_types, dex_cache->NumResolvedTypes());
+    for (size_t i = 0; i < num_types; ++i) {
+      mirror::Class* klass = dex_cache->GetResolvedType(i);
+      // Filter out null class loader since that is the boot class loader.
+      if (klass == nullptr || (ignore_boot_classes && klass->GetClassLoader() == nullptr)) {
+        continue;
+      }
+      ++num_resolved;
+      DCHECK(!klass->IsProxyClass());
+      DCHECK(klass->IsResolved());
+      mirror::DexCache* klass_dex_cache = klass->GetDexCache();
+      if (klass_dex_cache == dex_cache) {
+        const size_t class_def_idx = klass->GetDexClassDefIndex();
+        DCHECK(klass->IsResolved());
+        CHECK_LT(class_def_idx, num_class_defs);
+        class_set.insert(class_def_idx);
+      }
+    }
+
+    if (!class_set.empty()) {
+      auto it = ret.find(resolved_classes);
+      if (it != ret.end()) {
+        // Already have the key, union the class def idxs.
+        it->AddClasses(class_set.begin(), class_set.end());
+      } else {
+        resolved_classes.AddClasses(class_set.begin(), class_set.end());
+        ret.insert(resolved_classes);
+      }
+    }
+
+    VLOG(class_linker) << "Dex location " << location << " has " << num_resolved << " / "
+                       << num_class_defs << " resolved classes";
+  }
+  VLOG(class_linker) << "Collecting class profile took " << PrettyDuration(NanoTime() - start_time);
+  return ret;
+}
+
+std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
+    const std::set<DexCacheResolvedClasses>& classes) {
+  std::unordered_set<std::string> ret;
+  Thread* const self = Thread::Current();
+  std::unordered_map<std::string, const DexFile*> location_to_dex_file;
+  ScopedObjectAccess soa(self);
+  ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__);
+  ReaderMutexLock mu(self, *DexLock());
+  for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
+    if (!self->IsJWeakCleared(data.weak_root)) {
+      mirror::DexCache* dex_cache =
+          down_cast<mirror::DexCache*>(soa.Self()->DecodeJObject(data.weak_root));
+      if (dex_cache != nullptr) {
+        const DexFile* dex_file = dex_cache->GetDexFile();
+        // There could be duplicates if two dex files with the same location are mapped.
+        location_to_dex_file.emplace(
+            ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+      }
+    }
+  }
+  for (const DexCacheResolvedClasses& info : classes) {
+    const std::string& profile_key = info.GetDexLocation();
+    auto found = location_to_dex_file.find(profile_key);
+    if (found != location_to_dex_file.end()) {
+      const DexFile* dex_file = found->second;
+      VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
+                     << info.GetClasses().size() << " classes";
+      DCHECK_EQ(dex_file->GetLocationChecksum(), info.GetLocationChecksum());
+      for (uint16_t class_def_idx : info.GetClasses()) {
+        if (class_def_idx >= dex_file->NumClassDefs()) {
+          LOG(WARNING) << "Class def index " << class_def_idx << " >= " << dex_file->NumClassDefs();
+          continue;
+        }
+        const DexFile::TypeId& type_id = dex_file->GetTypeId(
+            dex_file->GetClassDef(class_def_idx).class_idx_);
+        const char* descriptor = dex_file->GetTypeDescriptor(type_id);
+        ret.insert(descriptor);
+      }
+    } else {
+      VLOG(class_linker) << "Failed to find opened dex file for profile key " << profile_key;
+    }
+  }
+  return ret;
+}
+
 // Instantiate ResolveMethod.
 template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
     const DexFile& dex_file,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a9448f7..0a75b27 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -17,8 +17,10 @@
 #ifndef ART_RUNTIME_CLASS_LINKER_H_
 #define ART_RUNTIME_CLASS_LINKER_H_
 
+#include <set>
 #include <string>
 #include <unordered_map>
+#include <unordered_set>
 #include <utility>
 #include <vector>
 
@@ -27,6 +29,7 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "class_table.h"
+#include "dex_cache_resolved_classes.h"
 #include "dex_file.h"
 #include "gc_root.h"
 #include "jni.h"
@@ -472,12 +475,6 @@
   const void* GetQuickOatCodeFor(ArtMethod* method)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Get the oat code for a method from a method index.
-  const void* GetQuickOatCodeFor(const DexFile& dex_file,
-                                 uint16_t class_def_idx,
-                                 uint32_t method_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   // Get compiled code for a method, return null if no code
   // exists. This is unlike Get..OatCodeFor which will return a bridge
   // or interpreter entrypoint.
@@ -579,12 +576,12 @@
 
   // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
   // allocator for this class loader is already created.
-  static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+  LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
   // set it. TODO: Consider using a lock other than classlinker_classes_lock_.
-  static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+  LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
       REQUIRES(!Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -595,6 +592,13 @@
   static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
+      REQUIRES(!dex_lock_);
+
+  std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
+      const std::set<DexCacheResolvedClasses>& classes)
+      REQUIRES(!dex_lock_);
+
   struct DexCacheData {
     // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
     // not work properly.
@@ -976,9 +980,16 @@
   mirror::Class* LookupClassFromBootImage(const char* descriptor)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  // Register a class loader and create its class table and allocator. Should not be called if
+  // these are already created.
+  void RegisterClassLoader(mirror::ClassLoader* class_loader)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::classlinker_classes_lock_);
+
   // Returns null if not found.
   ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+
   // Insert a new class table if not found.
   ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5c3029a..488826b 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -263,7 +263,7 @@
     for (ArtMethod& method : klass->GetCopiedMethods(sizeof(void*))) {
       AssertMethod(&method);
       EXPECT_FALSE(method.IsDirect());
-      EXPECT_TRUE(method.MightBeCopied());
+      EXPECT_TRUE(method.IsCopied());
       EXPECT_TRUE(method.GetDeclaringClass()->IsInterface())
           << "declaring class: " << PrettyClass(method.GetDeclaringClass());
       EXPECT_TRUE(method.GetDeclaringClass()->IsAssignableFrom(klass.Get()))
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 904490a..bc65893 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -28,6 +28,7 @@
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
 #include "dex_instruction.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
 #include "gc/scoped_gc_critical_section.h"
@@ -570,6 +571,29 @@
   return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
 }
 
+// Used to patch boot image method entry point to interpreter bridge.
+class UpdateEntryPointsClassVisitor : public ClassVisitor {
+ public:
+  explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
+      : instrumentation_(instrumentation) {}
+
+  bool operator()(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+    auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+    for (auto& m : klass->GetMethods(pointer_size)) {
+      const void* code = m.GetEntryPointFromQuickCompiledCode();
+      if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+          !m.IsNative() &&
+          !m.IsProxyMethod()) {
+        instrumentation_->UpdateMethodsCode(&m, GetQuickToInterpreterBridge());
+      }
+    }
+    return true;
+  }
+
+ private:
+  instrumentation::Instrumentation* const instrumentation_;
+};
+
 void Dbg::GoActive() {
   // Enable all debugging features, including scans for breakpoints.
   // This is a no-op if we're already active.
@@ -598,6 +622,14 @@
   }
 
   Runtime* runtime = Runtime::Current();
+  // Since boot image code is AOT compiled as not debuggable, we need to patch
+  // entry points of methods in boot image to interpreter bridge.
+  if (!runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
+    ScopedObjectAccess soa(self);
+    UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
+    runtime->GetClassLinker()->VisitClasses(&visitor);
+  }
+
   ScopedSuspendAll ssa(__FUNCTION__);
   if (RequiresDeoptimization()) {
     runtime->GetInstrumentation()->EnableDeoptimization();
diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h
new file mode 100644
index 0000000..80c12cb
--- /dev/null
+++ b/runtime/dex_cache_resolved_classes.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
+#define ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
+
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+namespace art {
+
+// Data structure for passing around which classes belonging to a dex cache / dex file are resolved.
+class DexCacheResolvedClasses {
+ public:
+  DexCacheResolvedClasses(const std::string& dex_location, uint32_t location_checksum)
+      : dex_location_(dex_location),
+        location_checksum_(location_checksum) {}
+
+  // Only compare the key elements, ignore the resolved classes.
+  int Compare(const DexCacheResolvedClasses& other) const {
+    if (location_checksum_ != other.location_checksum_) {
+      return static_cast<int>(location_checksum_ - other.location_checksum_);
+    }
+    return dex_location_.compare(other.dex_location_);
+  }
+
+  template <class InputIt>
+  void AddClasses(InputIt begin, InputIt end) const {
+    classes_.insert(begin, end);
+  }
+
+  const std::string& GetDexLocation() const {
+    return dex_location_;
+  }
+
+  uint32_t GetLocationChecksum() const {
+    return location_checksum_;
+  }
+
+  const std::unordered_set<uint16_t>& GetClasses() const {
+    return classes_;
+  }
+
+ private:
+  const std::string dex_location_;
+  const uint32_t location_checksum_;
+  // Array of resolved class def indexes.
+  mutable std::unordered_set<uint16_t> classes_;
+};
+
+inline bool operator<(const DexCacheResolvedClasses& a, const DexCacheResolvedClasses& b) {
+  return a.Compare(b) < 0;
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index fbf028d..4e01d80 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -93,7 +93,7 @@
 extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
 
 // Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 extern "C" void* art_quick_memcpy(void*, const void*, size_t);
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index faa4747..79d1c13 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -119,7 +119,7 @@
   V(ShrLong, uint64_t, uint64_t, uint32_t) \
   V(UshrLong, uint64_t, uint64_t, uint32_t) \
 \
-  V(IndexOf, int32_t, void*, uint32_t, uint32_t, uint32_t) \
+  V(IndexOf, int32_t, void*, uint32_t, uint32_t) \
   V(StringCompareTo, int32_t, void*, void*) \
   V(Memcpy, void*, void*, const void*, size_t) \
 \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index e72809b..c621672 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -119,10 +119,10 @@
 
     // Skip across the entrypoints structures.
 
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, thread_local_start, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, mterp_current_ibase, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4de5388..4672483 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -245,6 +245,9 @@
       heap->SetAllocTrackingEnabled(true);
     }
   } else {
+    // Delete outside of the critical section to avoid possible lock violations like the runtime
+    // shutdown lock.
+    std::unique_ptr<AllocRecordObjectMap> map;
     {
       MutexLock mu(self, *Locks::alloc_tracker_lock_);
       if (!heap->IsAllocTrackingEnabled()) {
@@ -252,7 +255,7 @@
       }
       heap->SetAllocTrackingEnabled(false);
       LOG(INFO) << "Disabling alloc tracker";
-      heap->SetAllocationRecords(nullptr);
+      map = heap->ReleaseAllocationRecords();
     }
     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 26da4ca..1e5f283 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -16,6 +16,9 @@
 
 #include "immune_spaces.h"
 
+#include <vector>
+#include <tuple>
+
 #include "gc/space/space-inl.h"
 #include "mirror/object.h"
 #include "oat_file.h"
@@ -32,11 +35,12 @@
 void ImmuneSpaces::CreateLargestImmuneRegion() {
   uintptr_t best_begin = 0u;
   uintptr_t best_end = 0u;
+  uintptr_t best_heap_size = 0u;
   uintptr_t cur_begin = 0u;
   uintptr_t cur_end = 0u;
-  // TODO: If the last space is an image space, we may include its oat file in the immune region.
-  // This could potentially hide heap corruption bugs if there is invalid pointers that point into
-  // the boot oat code
+  uintptr_t cur_heap_size = 0u;
+  using Interval = std::tuple</*start*/uintptr_t, /*end*/uintptr_t, /*is_heap*/bool>;
+  std::vector<Interval> intervals;
   for (space::ContinuousSpace* space : GetSpaces()) {
     uintptr_t space_begin = reinterpret_cast<uintptr_t>(space->Begin());
     uintptr_t space_end = reinterpret_cast<uintptr_t>(space->Limit());
@@ -50,29 +54,47 @@
       // creation, the actual oat file could be somewhere else.
       const OatFile* const image_oat_file = image_space->GetOatFile();
       if (image_oat_file != nullptr) {
-        uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_oat_file->Begin());
-        uintptr_t oat_end = reinterpret_cast<uintptr_t>(image_oat_file->End());
-        if (space_end == oat_begin) {
-          DCHECK_GE(oat_end, oat_begin);
-          space_end = oat_end;
-        }
+        intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
+                                     reinterpret_cast<uintptr_t>(image_oat_file->End()),
+                                     /*image*/false));
       }
     }
-    if (cur_begin == 0u) {
-      cur_begin = space_begin;
-      cur_end = space_end;
-    } else if (cur_end == space_begin) {
-      // Extend current region.
-      cur_end = space_end;
-    } else {
-      // Reset.
-      cur_begin = 0;
-      cur_end = 0;
+    intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
+  }
+  std::sort(intervals.begin(), intervals.end());
+  // Intervals are already sorted by begin, if a new interval begins at the end of the current
+  // region then we append, otherwise we restart the current interval. To prevent starting an
+  // interval on an oat file, ignore oat files that are not extending an existing interval.
+  // If the total number of image bytes in the current interval is larger than the current best
+  // one, then we set the best one to be the current one.
+  for (const Interval& interval : intervals) {
+    const uintptr_t begin = std::get<0>(interval);
+    const uintptr_t end = std::get<1>(interval);
+    const bool is_heap = std::get<2>(interval);
+    VLOG(collector) << "Interval " << reinterpret_cast<const void*>(begin) << "-"
+                    << reinterpret_cast<const void*>(end) << " is_heap=" << is_heap;
+    DCHECK_GE(end, begin);
+    DCHECK_GE(begin, cur_end);
+    // New interval is not at the end of the current one, start a new interval if we are a heap
+    // interval. Otherwise continue since we never start a new region with non image intervals.
+    if (begin != cur_end) {
+      if (!is_heap) {
+        continue;
+      }
+      // Not extending, reset the region.
+      cur_begin = begin;
+      cur_heap_size = 0;
     }
-    if (cur_end - cur_begin > best_end - best_begin) {
-      // Improvement, update the best range.
-      best_begin = cur_begin;
-      best_end = cur_end;
+    cur_end = end;
+    if (is_heap) {
+      // Only update if the total number of image bytes is greater than the current best one.
+      // We don't want to count the oat file bytes since these contain no java objects.
+      cur_heap_size += end - begin;
+      if (cur_heap_size > best_heap_size) {
+        best_begin = cur_begin;
+        best_end = cur_end;
+        best_heap_size = cur_heap_size;
+      }
     }
   }
   largest_immune_region_.SetBegin(reinterpret_cast<mirror::Object*>(best_begin));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 56838f5..cf93ec6 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -28,7 +28,136 @@
 namespace gc {
 namespace collector {
 
-class ImmuneSpacesTest : public CommonRuntimeTest {};
+class DummyOatFile : public OatFile {
+ public:
+  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+    begin_ = begin;
+    end_ = end;
+  }
+};
+
+class DummyImageSpace : public space::ImageSpace {
+ public:
+  DummyImageSpace(MemMap* map,
+                  accounting::ContinuousSpaceBitmap* live_bitmap,
+                  std::unique_ptr<DummyOatFile>&& oat_file,
+                  std::unique_ptr<MemMap>&& oat_map)
+      : ImageSpace("DummyImageSpace",
+                   /*image_location*/"",
+                   map,
+                   live_bitmap,
+                   map->End()),
+        oat_map_(std::move(oat_map)) {
+    oat_file_ = std::move(oat_file);
+    oat_file_non_owned_ = oat_file_.get();
+  }
+
+ private:
+  std::unique_ptr<MemMap> oat_map_;
+};
+
+class ImmuneSpacesTest : public CommonRuntimeTest {
+  static constexpr size_t kMaxBitmaps = 10;
+
+ public:
+  ImmuneSpacesTest() {}
+
+  void ReserveBitmaps() {
+    // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
+    // do not need to cover the image spaces though.
+    for (size_t i = 0; i < kMaxBitmaps; ++i) {
+      std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+          accounting::ContinuousSpaceBitmap::Create("bitmap",
+                                                    reinterpret_cast<uint8_t*>(kPageSize),
+                                                    kPageSize));
+      CHECK(bitmap != nullptr);
+      live_bitmaps_.push_back(std::move(bitmap));
+    }
+  }
+
+  // Create an image space, the oat file is optional.
+  DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
+                                    size_t image_size,
+                                    uint8_t* oat_begin,
+                                    size_t oat_size) {
+    std::string error_str;
+    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
+                                                     image_begin,
+                                                     image_size,
+                                                     PROT_READ | PROT_WRITE,
+                                                     /*low_4gb*/true,
+                                                     /*reuse*/false,
+                                                     &error_str));
+    if (map == nullptr) {
+      LOG(ERROR) << error_str;
+      return nullptr;
+    }
+    CHECK(!live_bitmaps_.empty());
+    std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
+    live_bitmaps_.pop_back();
+    std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
+                                                         oat_begin,
+                                                         oat_size,
+                                                         PROT_READ | PROT_WRITE,
+                                                         /*low_4gb*/true,
+                                                         /*reuse*/false,
+                                                         &error_str));
+    if (oat_map == nullptr) {
+      LOG(ERROR) << error_str;
+      return nullptr;
+    }
+    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+    // Create image header.
+    ImageSection sections[ImageHeader::kSectionCount];
+    new (map->Begin()) ImageHeader(
+        /*image_begin*/PointerToLowMemUInt32(map->Begin()),
+        /*image_size*/map->Size(),
+        sections,
+        /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+        /*oat_checksum*/0u,
+        // The oat file data in the header is always right after the image space.
+        /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
+        /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
+        /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
+        /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
+        /*boot_image_begin*/0u,
+        /*boot_image_size*/0u,
+        /*boot_oat_begin*/0u,
+        /*boot_oat_size*/0u,
+        /*pointer_size*/sizeof(void*),
+        /*compile_pic*/false,
+        /*is_pic*/false,
+        ImageHeader::kStorageModeUncompressed,
+        /*storage_size*/0u);
+    return new DummyImageSpace(map.release(),
+                               live_bitmap.release(),
+                               std::move(oat_file),
+                               std::move(oat_map));
+  }
+
+  // Does not reserve the memory, the caller needs to be sure no other threads will map at the
+  // returned address.
+  static uint8_t* GetContinuousMemoryRegion(size_t size) {
+    std::string error_str;
+    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
+                                                     nullptr,
+                                                     size,
+                                                     PROT_READ | PROT_WRITE,
+                                                     /*low_4gb*/true,
+                                                     /*reuse*/false,
+                                                     &error_str));
+    if (map == nullptr) {
+      LOG(ERROR) << "Failed to allocate memory region " << error_str;
+      return nullptr;
+    }
+    return map->Begin();
+  }
+
+ private:
+  // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
+  // them to randomly get placed somewhere where we want an image space.
+  std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
+};
 
 class DummySpace : public space::ContinuousSpace {
  public:
@@ -72,94 +201,41 @@
   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
 }
 
-class DummyOatFile : public OatFile {
- public:
-  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
-    begin_ = begin;
-    end_ = end;
-  }
-};
-
-class DummyImageSpace : public space::ImageSpace {
- public:
-  DummyImageSpace(MemMap* map,
-                  accounting::ContinuousSpaceBitmap* live_bitmap,
-                  std::unique_ptr<DummyOatFile>&& oat_file)
-      : ImageSpace("DummyImageSpace",
-                   /*image_location*/"",
-                   map,
-                   live_bitmap,
-                   map->End()) {
-    oat_file_ = std::move(oat_file);
-    oat_file_non_owned_ = oat_file_.get();
-  }
-
-  // Size is the size of the image space, oat offset is where the oat file is located
-  // after the end of image space. oat_size is the size of the oat file.
-  static DummyImageSpace* Create(size_t size, size_t oat_offset, size_t oat_size) {
-    std::string error_str;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
-                                                     nullptr,
-                                                     size,
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     &error_str));
-    if (map == nullptr) {
-      LOG(ERROR) << error_str;
-      return nullptr;
-    }
-    std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(
-        accounting::ContinuousSpaceBitmap::Create("bitmap", map->Begin(), map->Size()));
-    if (live_bitmap == nullptr) {
-      return nullptr;
-    }
-    // The actual mapped oat file may not be directly after the image for the app image case.
-    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(map->End() + oat_offset,
-                                                            map->End() + oat_offset + oat_size));
-    // Create image header.
-    ImageSection sections[ImageHeader::kSectionCount];
-    new (map->Begin()) ImageHeader(
-        /*image_begin*/PointerToLowMemUInt32(map->Begin()),
-        /*image_size*/map->Size(),
-        sections,
-        /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
-        /*oat_checksum*/0u,
-        // The oat file data in the header is always right after the image space.
-        /*oat_file_begin*/PointerToLowMemUInt32(map->End()),
-        /*oat_data_begin*/PointerToLowMemUInt32(map->End()),
-        /*oat_data_end*/PointerToLowMemUInt32(map->End() + oat_size),
-        /*oat_file_end*/PointerToLowMemUInt32(map->End() + oat_size),
-        /*boot_image_begin*/0u,
-        /*boot_image_size*/0u,
-        /*boot_oat_begin*/0u,
-        /*boot_oat_size*/0u,
-        /*pointer_size*/sizeof(void*),
-        /*compile_pic*/false,
-        /*is_pic*/false,
-        ImageHeader::kStorageModeUncompressed,
-        /*storage_size*/0u);
-    return new DummyImageSpace(map.release(), live_bitmap.release(), std::move(oat_file));
-  }
-};
-
+// Tests [image][oat][space] producing a single large immune region.
 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
+  ReserveBitmaps();
   ImmuneSpaces spaces;
-  constexpr size_t image_size = 123 * kPageSize;
-  constexpr size_t image_oat_size = 321 * kPageSize;
-  std::unique_ptr<DummyImageSpace> image_space(DummyImageSpace::Create(image_size,
-                                                                       0,
-                                                                       image_oat_size));
+  constexpr size_t kImageSize = 123 * kPageSize;
+  constexpr size_t kImageOatSize = 321 * kPageSize;
+  constexpr size_t kOtherSpaceSize= 100 * kPageSize;
+
+  uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+
+  std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
+                                                                kImageSize,
+                                                                memory + kImageSize,
+                                                                kImageOatSize));
   ASSERT_TRUE(image_space != nullptr);
   const ImageHeader& image_header = image_space->GetImageHeader();
-  EXPECT_EQ(image_header.GetImageSize(), image_size);
+  DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
+
+  EXPECT_EQ(image_header.GetImageSize(), kImageSize);
   EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
-            image_oat_size);
-  DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + 813 * kPageSize);
-  EXPECT_NE(image_space->Limit(), space.Begin());
+            kImageOatSize);
+  EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
+  // Check that we do not include the oat if there is no space after.
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     spaces.AddSpace(image_space.get());
+  }
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+            image_space->Begin());
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+            image_space->Limit());
+  // Add another space and ensure it gets appended.
+  EXPECT_NE(image_space->Limit(), space.Begin());
+  {
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     spaces.AddSpace(&space);
   }
   EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
@@ -170,18 +246,122 @@
   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
             image_space->Begin());
   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
-  // Check that appending with a gap between the map does not include the oat file.
-  image_space.reset(DummyImageSpace::Create(image_size, kPageSize, image_oat_size));
-  spaces.Reset();
+}
+
+// Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
+TEST_F(ImmuneSpacesTest, MultiImage) {
+  ReserveBitmaps();
+  // Image 2 needs to be smaller or else it may be chosen for immune region.
+  constexpr size_t kImage1Size = kPageSize * 17;
+  constexpr size_t kImage2Size = kPageSize * 13;
+  constexpr size_t kImage3Size = kPageSize * 3;
+  constexpr size_t kImage1OatSize = kPageSize * 5;
+  constexpr size_t kImage2OatSize = kPageSize * 8;
+  constexpr size_t kImage3OatSize = kPageSize;
+  constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
+  constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
+  uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
+  uint8_t* space1_begin = memory;
+  memory += kImage1Size;
+  uint8_t* space2_begin = memory;
+  memory += kImage2Size;
+  uint8_t* space1_oat_begin = memory;
+  memory += kImage1OatSize;
+  uint8_t* space2_oat_begin = memory;
+  memory += kImage2OatSize;
+  uint8_t* space3_begin = memory;
+
+  std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
+                                                           kImage1Size,
+                                                           space1_oat_begin,
+                                                           kImage1OatSize));
+  ASSERT_TRUE(space1 != nullptr);
+
+
+  std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
+                                                           kImage2Size,
+                                                           space2_oat_begin,
+                                                           kImage2OatSize));
+  ASSERT_TRUE(space2 != nullptr);
+
+  // Finally put a 3rd image space.
+  std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
+                                                           kImage3Size,
+                                                           space3_begin + kImage3Size,
+                                                           kImage3OatSize));
+  ASSERT_TRUE(space3 != nullptr);
+
+  // Check that we do not include the oat if there is no space after.
+  ImmuneSpaces spaces;
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    spaces.AddSpace(image_space.get());
+    LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
+    spaces.AddSpace(space1.get());
+    LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
+    spaces.AddSpace(space2.get());
+  }
+  // There are no more heap bytes, the immune region should only be the first 2 image spaces and
+  // should exclude the image oat files.
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+            space1->Begin());
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+            space2->Limit());
+
+  // Add another space after the oat files, now it should contain the entire memory region.
+  {
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
+    spaces.AddSpace(space3.get());
   }
   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
-            image_space->Begin());
-  // Size should be equal, we should not add the oat file since it is not adjacent to the image
-  // space.
-  EXPECT_EQ(spaces.GetLargestImmuneRegion().Size(), image_size);
+            space1->Begin());
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+            space3->Limit());
+
+  // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
+  // Image size is kImageBytes - kPageSize
+  // Oat size is kPageSize.
+  // Guard pages to ensure it is not adjacent to an existing immune region.
+  // Layout:  [guard page][image][oat][guard page]
+  constexpr size_t kGuardSize = kPageSize;
+  constexpr size_t kImage4Size = kImageBytes - kPageSize;
+  constexpr size_t kImage4OatSize = kPageSize;
+  uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
+  std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
+                                                           kImage4Size,
+                                                           memory2 + kGuardSize + kImage4Size,
+                                                           kImage4OatSize));
+  ASSERT_TRUE(space4 != nullptr);
+  {
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
+    spaces.AddSpace(space4.get());
+  }
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+            space1->Begin());
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+            space3->Limit());
+
+  // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
+  // Image size is kImageBytes + kPageSize
+  // Oat size is kPageSize.
+  // Guard pages to ensure it is not adjacent to an existing immune region.
+  // Layout:  [guard page][image][oat][guard page]
+  constexpr size_t kImage5Size = kImageBytes + kPageSize;
+  constexpr size_t kImage5OatSize = kPageSize;
+  uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
+  std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
+                                                           kImage5Size,
+                                                           memory3 + kGuardSize + kImage5Size,
+                                                           kImage5OatSize));
+  ASSERT_TRUE(space5 != nullptr);
+  {
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
+    spaces.AddSpace(space5.get());
+  }
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
 }
 
 }  // namespace collector
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index f437830..d7023d8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -109,16 +109,25 @@
     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
                                               &usable_size, &bytes_tl_bulk_allocated);
     if (UNLIKELY(obj == nullptr)) {
-      bool is_current_allocator = allocator == GetCurrentAllocator();
-      obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
+      // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
+      // or changes the allocator in a suspend point here, we need to retry the allocation.
+      obj = AllocateInternalWithGc(self,
+                                   allocator,
+                                   kInstrumented,
+                                   byte_count,
+                                   &bytes_allocated,
+                                   &usable_size,
                                    &bytes_tl_bulk_allocated, &klass);
       if (obj == nullptr) {
-        bool after_is_current_allocator = allocator == GetCurrentAllocator();
-        // If there is a pending exception, fail the allocation right away since the next one
-        // could cause OOM and abort the runtime.
-        if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
-          // If the allocator changed, we need to restart the allocation.
-          return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
+        // The only way that we can get a null return if there is no pending exception is if the
+        // allocator or instrumentation changed.
+        if (!self->IsExceptionPending()) {
+          // AllocObject will pick up the new allocator type, and instrumented as true is the safe
+          // default.
+          return AllocObject</*kInstrumented*/true>(self,
+                                                    klass,
+                                                    byte_count,
+                                                    pre_fence_visitor);
         }
         return nullptr;
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a656fb8..bebff0f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1650,8 +1650,15 @@
   return nullptr;
 }
 
+static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
+  instrumentation::Instrumentation* const instrumentation =
+      Runtime::Current()->GetInstrumentation();
+  return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
+}
+
 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
                                              AllocatorType allocator,
+                                             bool instrumented,
                                              size_t alloc_size,
                                              size_t* bytes_allocated,
                                              size_t* usable_size,
@@ -1667,12 +1674,13 @@
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
+  // If we were the default allocator but the allocator changed while we were suspended,
+  // abort the allocation.
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
+    return nullptr;
+  }
   if (last_gc != collector::kGcTypeNone) {
-    // If we were the default allocator but the allocator changed while we were suspended,
-    // abort the allocation.
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      return nullptr;
-    }
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                      usable_size, bytes_tl_bulk_allocated);
@@ -1684,7 +1692,8 @@
   collector::GcType tried_type = next_gc_type_;
   const bool gc_ran =
       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
-  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
   }
   if (gc_ran) {
@@ -1703,7 +1712,8 @@
     // Attempt to run the collector, if we succeed, re-try the allocation.
     const bool plan_gc_ran =
         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
+    if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+        (!instrumented && EntrypointsInstrumented())) {
       return nullptr;
     }
     if (plan_gc_ran) {
@@ -1732,7 +1742,8 @@
   // We don't need a WaitForGcToComplete here either.
   DCHECK(!gc_plan_.empty());
   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
-  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
@@ -1748,6 +1759,11 @@
             min_interval_homogeneous_space_compaction_by_oom_) {
           last_time_homogeneous_space_compaction_by_oom_ = current_time;
           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+          // Thread suspension could have occurred.
+          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+              (!instrumented && EntrypointsInstrumented())) {
+            return nullptr;
+          }
           switch (result) {
             case HomogeneousSpaceCompactResult::kSuccess:
               // If the allocation succeeded, we delayed an oom.
@@ -1788,6 +1804,11 @@
           // If we aren't out of memory then the OOM was probably from the non moving space being
           // full. Attempt to disable compaction and turn the main space into a non moving space.
           DisableMovingGc();
+          // Thread suspension could have occurred.
+          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+              (!instrumented && EntrypointsInstrumented())) {
+            return nullptr;
+          }
           // If we are still a moving GC then something must have caused the transition to fail.
           if (IsMovingGc(collector_type_)) {
             MutexLock mu(self, *gc_complete_lock_);
@@ -3926,6 +3947,10 @@
   allocation_records_.reset(records);
 }
 
+std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() {
+  return std::move(allocation_records_);
+}
+
 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
   if (IsAllocTrackingEnabled()) {
     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
@@ -4058,6 +4083,15 @@
   return false;
 }
 
+bool Heap::IsInBootImageOatFile(const void* p) const {
+  for (gc::space::ImageSpace* space : boot_image_spaces_) {
+    if (space->GetOatFile()->Contains(p)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
                              uint32_t* boot_image_end,
                              uint32_t* boot_oat_begin,
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a181e23..889069d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -605,6 +605,9 @@
   bool ObjectIsInBootImageSpace(mirror::Object* obj) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  bool IsInBootImageOatFile(const void* p) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   void GetBootImagesSize(uint32_t* boot_image_begin,
                          uint32_t* boot_image_end,
                          uint32_t* boot_oat_begin,
@@ -763,6 +766,10 @@
     return allocation_records_.get();
   }
 
+  // Release ownership of the allocation records.
+  std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords()
+      REQUIRES(Locks::alloc_tracker_lock_);
+
   void SetAllocationRecords(AllocRecordObjectMap* records)
       REQUIRES(Locks::alloc_tracker_lock_);
 
@@ -862,6 +869,7 @@
   // an initial allocation attempt failed.
   mirror::Object* AllocateInternalWithGc(Thread* self,
                                          AllocatorType allocator,
+                                         bool instrumented,
                                          size_t num_bytes,
                                          size_t* bytes_allocated,
                                          size_t* usable_size,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 894ce9a..5aaf104 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1252,7 +1252,8 @@
     // Only care about the error message for the last address in addresses. We want to avoid the
     // overhead of printing the process maps if we can relocate.
     std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr;
-    if (image_header->GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
+    const ImageHeader::StorageMode storage_mode = image_header->GetStorageMode();
+    if (storage_mode == ImageHeader::kStorageModeUncompressed) {
       map.reset(MemMap::MapFileAtAddress(address,
                                          image_header->GetImageSize(),
                                          PROT_READ | PROT_WRITE,
@@ -1264,6 +1265,12 @@
                                          image_filename,
                                          /*out*/out_error_msg));
     } else {
+      if (storage_mode != ImageHeader::kStorageModeLZ4 &&
+          storage_mode != ImageHeader::kStorageModeLZ4HC) {
+        *error_msg = StringPrintf("Invalid storage mode in image header %d",
+                                  static_cast<int>(storage_mode));
+        return nullptr;
+      }
       // Reserve output and decompress into it.
       map.reset(MemMap::MapAnonymous(image_location,
                                      address,
@@ -1271,7 +1278,7 @@
                                      PROT_READ | PROT_WRITE,
                                      /*low_4gb*/true,
                                      /*reuse*/false,
-                                     out_error_msg));
+                                     /*out*/out_error_msg));
       if (map != nullptr) {
         const size_t stored_size = image_header->GetDataSize();
         const size_t write_offset = sizeof(ImageHeader);  // Skip the header.
@@ -1289,6 +1296,8 @@
         }
         memcpy(map->Begin(), image_header, sizeof(ImageHeader));
         const uint64_t start = NanoTime();
+        // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
+        TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
         const size_t decompressed_size = LZ4_decompress_safe(
             reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
             reinterpret_cast<char*>(map->Begin()) + write_offset,
diff --git a/runtime/image.h b/runtime/image.h
index 146ee00..8e5dbad 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -81,6 +81,7 @@
   enum StorageMode : uint32_t {
     kStorageModeUncompressed,
     kStorageModeLZ4,
+    kStorageModeLZ4HC,
     kStorageModeCount,  // Number of elements in enum.
   };
   static constexpr StorageMode kDefaultStorageMode = kStorageModeUncompressed;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 7484635..b107b72 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -104,6 +104,14 @@
   method->SetEntryPointFromQuickCompiledCode(quick_code);
 }
 
+bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  return Dbg::IsDebuggerActive() &&
+         Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+         !method->IsNative() &&
+         !method->IsProxyMethod();
+}
+
 void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
   if (!method->IsInvokable() || method->IsProxyMethod()) {
     // Do not change stubs for these methods.
@@ -124,6 +132,9 @@
       new_quick_code = GetQuickToInterpreterBridge();
     } else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
       new_quick_code = class_linker->GetQuickOatCodeFor(method);
+      if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+        new_quick_code = GetQuickToInterpreterBridge();
+      }
     } else {
       new_quick_code = GetQuickResolutionStub();
     }
@@ -136,10 +147,13 @@
       // class, all its static methods code will be set to the instrumentation entry point.
       // For more details, see ClassLinker::FixupStaticTrampolines.
       if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
-        if (entry_exit_stubs_installed_) {
+        new_quick_code = class_linker->GetQuickOatCodeFor(method);
+        if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+          // Oat code should not be used. Don't install instrumentation stub and
+          // use interpreter for instrumentation.
+          new_quick_code = GetQuickToInterpreterBridge();
+        } else if (entry_exit_stubs_installed_) {
           new_quick_code = GetQuickInstrumentationEntryPoint();
-        } else {
-          new_quick_code = class_linker->GetQuickOatCodeFor(method);
         }
       } else {
         new_quick_code = GetQuickResolutionStub();
@@ -775,6 +789,9 @@
       UpdateEntrypoints(method, GetQuickResolutionStub());
     } else {
       const void* quick_code = class_linker->GetQuickOatCodeFor(method);
+      if (NeedDebugVersionForBootImageCode(method, quick_code)) {
+        quick_code = GetQuickToInterpreterBridge();
+      }
       UpdateEntrypoints(method, quick_code);
     }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e3cbf53..b3cdb41 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -247,6 +247,11 @@
     return forced_interpret_only_;
   }
 
+  // Code is in boot image oat file which isn't compiled as debuggable.
+  // Need debug version (interpreter or jitted) if that's the case.
+  bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   bool AreExitStubsInstalled() const {
     return instrumentation_stubs_installed_;
   }
@@ -414,6 +419,12 @@
                                size_t inlined_frames_before_frame)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  // Does not hold lock, used to check if someone changed from not instrumented to instrumented
+  // during a GC suspend point.
+  bool AllocEntrypointsInstrumented() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    return quick_alloc_entry_points_instrumentation_counter_ > 0;
+  }
+
  private:
   InstrumentationLevel GetCurrentInstrumentationLevel() const;
 
@@ -567,9 +578,7 @@
   InterpreterHandlerTable interpreter_handler_table_ GUARDED_BY(Locks::mutator_lock_);
 
   // Greater than 0 if quick alloc entry points instrumented.
-  size_t quick_alloc_entry_points_instrumentation_counter_
-      GUARDED_BY(Locks::instrument_entrypoints_lock_);
-
+  size_t quick_alloc_entry_points_instrumentation_counter_;
   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
 
   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index a595d33..baf4afe 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -240,20 +240,10 @@
   return os;
 }
 
-#if !defined(__clang__)
-#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
-// TODO: remove when all targets implemented.
 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#else
-static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
-#endif
-#else
+
+#if defined(__clang__)
 // Clang 3.4 fails to build the goto interpreter implementation.
-#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
-static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#else
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
-#endif
 template<bool do_access_check, bool transaction_active>
 JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
   LOG(FATAL) << "UNREACHABLE";
@@ -295,9 +285,7 @@
     }
 
     jit::Jit* jit = Runtime::Current()->GetJit();
-    if (UNLIKELY(jit != nullptr &&
-                 jit->JitAtFirstUse() &&
-                 jit->GetCodeCache()->ContainsMethod(method))) {
+    if (jit != nullptr && jit->CanInvokeCompiledCode(method)) {
       JValue result;
 
       // Pop the shadow frame before calling into compiled code.
@@ -327,12 +315,8 @@
         while (true) {
           // Mterp does not support all instrumentation/debugging.
           if (MterpShouldSwitchInterpreters()) {
-#if !defined(__clang__)
-            return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
-#else
             return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
                                                    false);
-#endif
           }
           bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register);
           if (returned) {
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index ca8598e..12d6fdc 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -792,6 +792,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -810,6 +811,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -828,6 +830,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -846,6 +849,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -864,6 +868,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -882,6 +887,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -899,6 +905,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -916,6 +923,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -933,6 +941,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -950,6 +959,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -967,6 +977,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
@@ -984,6 +995,7 @@
       }
       ADVANCE(offset);
     } else {
+      BRANCH_INSTRUMENTATION(2);
       ADVANCE(2);
     }
   }
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 25dbab2..0488dbf 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -712,6 +712,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -727,6 +728,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -742,6 +744,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -757,6 +760,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -772,6 +776,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -787,6 +792,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -801,6 +807,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -815,6 +822,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -829,6 +837,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -843,6 +852,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -857,6 +867,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
@@ -871,6 +882,7 @@
           }
           inst = inst->RelativeAt(offset);
         } else {
+          BRANCH_INSTRUMENTATION(2);
           inst = inst->Next_2xx();
         }
         break;
diff --git a/runtime/interpreter/mterp/arm/bincmp.S b/runtime/interpreter/mterp/arm/bincmp.S
index 774e167..cfad714 100644
--- a/runtime/interpreter/mterp/arm/bincmp.S
+++ b/runtime/interpreter/mterp/arm/bincmp.S
@@ -6,14 +6,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    b${revcmp} .L_${opcode}_not_taken
+    mov${revcmp} rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -21,28 +22,10 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_${opcode}_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    mov${revcmp} rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/zcmp.S b/runtime/interpreter/mterp/arm/zcmp.S
index 800804d..3d7dec0 100644
--- a/runtime/interpreter/mterp/arm/zcmp.S
+++ b/runtime/interpreter/mterp/arm/zcmp.S
@@ -6,13 +6,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    b${revcmp} .L_${opcode}_not_taken
+    mov${revcmp} rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -20,25 +21,9 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_${opcode}_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    mov${revcmp} rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/bincmp.S b/runtime/interpreter/mterp/arm64/bincmp.S
index ed850fc..2356ecb 100644
--- a/runtime/interpreter/mterp/arm64/bincmp.S
+++ b/runtime/interpreter/mterp/arm64/bincmp.S
@@ -6,31 +6,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.${condition} .L_${opcode}_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_${opcode}_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -39,10 +14,18 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, ${condition} // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
diff --git a/runtime/interpreter/mterp/arm64/zcmp.S b/runtime/interpreter/mterp/arm64/zcmp.S
index e528d9f..3f1e1b1 100644
--- a/runtime/interpreter/mterp/arm64/zcmp.S
+++ b/runtime/interpreter/mterp/arm64/zcmp.S
@@ -6,39 +6,24 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.${condition} .L_${opcode}_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_${opcode}_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, ${condition} // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index d1221f7..c6292c3 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -1,4 +1,4 @@
-# Copyright (C) 2015 The Android Open Source Project
+# Copyright (C) 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 #
-# Configuration for MIPS_32
+# Configuration for MIPS_32 targets.
 #
 
 handler-style computed-goto
@@ -33,265 +33,265 @@
 
 # opcode list; argument to op-start is default directory
 op-start mips
-    # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
-    # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+    # (override example:) op op_sub_float_2addr arm-vfp
+    # (fallback example:) op op_sub_float_2addr FALLBACK
 
-    op op_nop FALLBACK
-    op op_move FALLBACK
-    op op_move_from16 FALLBACK
-    op op_move_16 FALLBACK
-    op op_move_wide FALLBACK
-    op op_move_wide_from16 FALLBACK
-    op op_move_wide_16 FALLBACK
-    op op_move_object FALLBACK
-    op op_move_object_from16 FALLBACK
-    op op_move_object_16 FALLBACK
-    op op_move_result FALLBACK
-    op op_move_result_wide FALLBACK
-    op op_move_result_object FALLBACK
-    op op_move_exception FALLBACK
-    op op_return_void FALLBACK
-    op op_return FALLBACK
-    op op_return_wide FALLBACK
-    op op_return_object FALLBACK
-    op op_const_4 FALLBACK
-    op op_const_16 FALLBACK
-    op op_const FALLBACK
-    op op_const_high16 FALLBACK
-    op op_const_wide_16 FALLBACK
-    op op_const_wide_32 FALLBACK
-    op op_const_wide FALLBACK
-    op op_const_wide_high16 FALLBACK
-    op op_const_string FALLBACK
-    op op_const_string_jumbo FALLBACK
-    op op_const_class FALLBACK
-    op op_monitor_enter FALLBACK
-    op op_monitor_exit FALLBACK
-    op op_check_cast FALLBACK
-    op op_instance_of FALLBACK
-    op op_array_length FALLBACK
-    op op_new_instance FALLBACK
-    op op_new_array FALLBACK
-    op op_filled_new_array FALLBACK
-    op op_filled_new_array_range FALLBACK
-    op op_fill_array_data FALLBACK
-    op op_throw FALLBACK
-    op op_goto FALLBACK
-    op op_goto_16 FALLBACK
-    op op_goto_32 FALLBACK
-    op op_packed_switch FALLBACK
-    op op_sparse_switch FALLBACK
-    op op_cmpl_float FALLBACK
-    op op_cmpg_float FALLBACK
-    op op_cmpl_double FALLBACK
-    op op_cmpg_double FALLBACK
-    op op_cmp_long FALLBACK
-    op op_if_eq FALLBACK
-    op op_if_ne FALLBACK
-    op op_if_lt FALLBACK
-    op op_if_ge FALLBACK
-    op op_if_gt FALLBACK
-    op op_if_le FALLBACK
-    op op_if_eqz FALLBACK
-    op op_if_nez FALLBACK
-    op op_if_ltz FALLBACK
-    op op_if_gez FALLBACK
-    op op_if_gtz FALLBACK
-    op op_if_lez FALLBACK
-    op_unused_3e FALLBACK
-    op_unused_3f FALLBACK
-    op_unused_40 FALLBACK
-    op_unused_41 FALLBACK
-    op_unused_42 FALLBACK
-    op_unused_43 FALLBACK
-    op op_aget FALLBACK
-    op op_aget_wide FALLBACK
-    op op_aget_object FALLBACK
-    op op_aget_boolean FALLBACK
-    op op_aget_byte FALLBACK
-    op op_aget_char FALLBACK
-    op op_aget_short FALLBACK
-    op op_aput FALLBACK
-    op op_aput_wide FALLBACK
-    op op_aput_object FALLBACK
-    op op_aput_boolean FALLBACK
-    op op_aput_byte FALLBACK
-    op op_aput_char FALLBACK
-    op op_aput_short FALLBACK
-    op op_iget FALLBACK
-    op op_iget_wide FALLBACK
-    op op_iget_object FALLBACK
-    op op_iget_boolean FALLBACK
-    op op_iget_byte FALLBACK
-    op op_iget_char FALLBACK
-    op op_iget_short FALLBACK
-    op op_iput FALLBACK
-    op op_iput_wide FALLBACK
-    op op_iput_object FALLBACK
-    op op_iput_boolean FALLBACK
-    op op_iput_byte FALLBACK
-    op op_iput_char FALLBACK
-    op op_iput_short FALLBACK
-    op op_sget FALLBACK
-    op op_sget_wide FALLBACK
-    op op_sget_object FALLBACK
-    op op_sget_boolean FALLBACK
-    op op_sget_byte FALLBACK
-    op op_sget_char FALLBACK
-    op op_sget_short FALLBACK
-    op op_sput FALLBACK
-    op op_sput_wide FALLBACK
-    op op_sput_object FALLBACK
-    op op_sput_boolean FALLBACK
-    op op_sput_byte FALLBACK
-    op op_sput_char FALLBACK
-    op op_sput_short FALLBACK
-    op op_invoke_virtual FALLBACK
-    op op_invoke_super FALLBACK
-    op op_invoke_direct FALLBACK
-    op op_invoke_static FALLBACK
-    op op_invoke_interface FALLBACK
-    op op_return_void_no_barrier FALLBACK
-    op op_invoke_virtual_range FALLBACK
-    op op_invoke_super_range FALLBACK
-    op op_invoke_direct_range FALLBACK
-    op op_invoke_static_range FALLBACK
-    op op_invoke_interface_range FALLBACK
-    op_unused_79 FALLBACK
-    op_unused_7a FALLBACK
-    op op_neg_int FALLBACK
-    op op_not_int FALLBACK
-    op op_neg_long FALLBACK
-    op op_not_long FALLBACK
-    op op_neg_float FALLBACK
-    op op_neg_double FALLBACK
-    op op_int_to_long FALLBACK
-    op op_int_to_float FALLBACK
-    op op_int_to_double FALLBACK
-    op op_long_to_int FALLBACK
-    op op_long_to_float FALLBACK
-    op op_long_to_double FALLBACK
-    op op_float_to_int FALLBACK
-    op op_float_to_long FALLBACK
-    op op_float_to_double FALLBACK
-    op op_double_to_int FALLBACK
-    op op_double_to_long FALLBACK
-    op op_double_to_float FALLBACK
-    op op_int_to_byte FALLBACK
-    op op_int_to_char FALLBACK
-    op op_int_to_short FALLBACK
-    op op_add_int FALLBACK
-    op op_sub_int FALLBACK
-    op op_mul_int FALLBACK
-    op op_div_int FALLBACK
-    op op_rem_int FALLBACK
-    op op_and_int FALLBACK
-    op op_or_int FALLBACK
-    op op_xor_int FALLBACK
-    op op_shl_int FALLBACK
-    op op_shr_int FALLBACK
-    op op_ushr_int FALLBACK
-    op op_add_long FALLBACK
-    op op_sub_long FALLBACK
-    op op_mul_long FALLBACK
-    op op_div_long FALLBACK
-    op op_rem_long FALLBACK
-    op op_and_long FALLBACK
-    op op_or_long FALLBACK
-    op op_xor_long FALLBACK
-    op op_shl_long FALLBACK
-    op op_shr_long FALLBACK
-    op op_ushr_long FALLBACK
-    op op_add_float FALLBACK
-    op op_sub_float FALLBACK
-    op op_mul_float FALLBACK
-    op op_div_float FALLBACK
-    op op_rem_float FALLBACK
-    op op_add_double FALLBACK
-    op op_sub_double FALLBACK
-    op op_mul_double FALLBACK
-    op op_div_double FALLBACK
-    op op_rem_double FALLBACK
-    op op_add_int_2addr FALLBACK
-    op op_sub_int_2addr FALLBACK
-    op op_mul_int_2addr FALLBACK
-    op op_div_int_2addr FALLBACK
-    op op_rem_int_2addr FALLBACK
-    op op_and_int_2addr FALLBACK
-    op op_or_int_2addr FALLBACK
-    op op_xor_int_2addr FALLBACK
-    op op_shl_int_2addr FALLBACK
-    op op_shr_int_2addr FALLBACK
-    op op_ushr_int_2addr FALLBACK
-    op op_add_long_2addr FALLBACK
-    op op_sub_long_2addr FALLBACK
-    op op_mul_long_2addr FALLBACK
-    op op_div_long_2addr FALLBACK
-    op op_rem_long_2addr FALLBACK
-    op op_and_long_2addr FALLBACK
-    op op_or_long_2addr FALLBACK
-    op op_xor_long_2addr FALLBACK
-    op op_shl_long_2addr FALLBACK
-    op op_shr_long_2addr FALLBACK
-    op op_ushr_long_2addr FALLBACK
-    op op_add_float_2addr FALLBACK
-    op op_sub_float_2addr FALLBACK
-    op op_mul_float_2addr FALLBACK
-    op op_div_float_2addr FALLBACK
-    op op_rem_float_2addr FALLBACK
-    op op_add_double_2addr FALLBACK
-    op op_sub_double_2addr FALLBACK
-    op op_mul_double_2addr FALLBACK
-    op op_div_double_2addr FALLBACK
-    op op_rem_double_2addr FALLBACK
-    op op_add_int_lit16 FALLBACK
-    op op_rsub_int FALLBACK
-    op op_mul_int_lit16 FALLBACK
-    op op_div_int_lit16 FALLBACK
-    op op_rem_int_lit16 FALLBACK
-    op op_and_int_lit16 FALLBACK
-    op op_or_int_lit16 FALLBACK
-    op op_xor_int_lit16 FALLBACK
-    op op_add_int_lit8 FALLBACK
-    op op_rsub_int_lit8 FALLBACK
-    op op_mul_int_lit8 FALLBACK
-    op op_div_int_lit8 FALLBACK
-    op op_rem_int_lit8 FALLBACK
-    op op_and_int_lit8 FALLBACK
-    op op_or_int_lit8 FALLBACK
-    op op_xor_int_lit8 FALLBACK
-    op op_shl_int_lit8 FALLBACK
-    op op_shr_int_lit8 FALLBACK
-    op op_ushr_int_lit8 FALLBACK
-    op op_iget_quick FALLBACK
-    op op_iget_wide_quick FALLBACK
-    op op_iget_object_quick FALLBACK
-    op op_iput_quick FALLBACK
-    op op_iput_wide_quick FALLBACK
-    op op_iput_object_quick FALLBACK
-    op op_invoke_virtual_quick FALLBACK
-    op op_invoke_virtual_range_quick FALLBACK
-    op op_iput_boolean_quick FALLBACK
-    op op_iput_byte_quick FALLBACK
-    op op_iput_char_quick FALLBACK
-    op op_iput_short_quick FALLBACK
-    op op_iget_boolean_quick FALLBACK
-    op op_iget_byte_quick FALLBACK
-    op op_iget_char_quick FALLBACK
-    op op_iget_short_quick FALLBACK
-    op_unused_f3 FALLBACK
-    op_unused_f4 FALLBACK
-    op_unused_f5 FALLBACK
-    op_unused_f6 FALLBACK
-    op_unused_f7 FALLBACK
-    op_unused_f8 FALLBACK
-    op_unused_f9 FALLBACK
-    op_unused_fa FALLBACK
-    op_unused_fb FALLBACK
-    op_unused_fc FALLBACK
-    op_unused_fd FALLBACK
-    op_unused_fe FALLBACK
-    op_unused_ff FALLBACK
+    # op op_nop FALLBACK
+    # op op_move FALLBACK
+    # op op_move_from16 FALLBACK
+    # op op_move_16 FALLBACK
+    # op op_move_wide FALLBACK
+    # op op_move_wide_from16 FALLBACK
+    # op op_move_wide_16 FALLBACK
+    # op op_move_object FALLBACK
+    # op op_move_object_from16 FALLBACK
+    # op op_move_object_16 FALLBACK
+    # op op_move_result FALLBACK
+    # op op_move_result_wide FALLBACK
+    # op op_move_result_object FALLBACK
+    # op op_move_exception FALLBACK
+    # op op_return_void FALLBACK
+    # op op_return FALLBACK
+    # op op_return_wide FALLBACK
+    # op op_return_object FALLBACK
+    # op op_const_4 FALLBACK
+    # op op_const_16 FALLBACK
+    # op op_const FALLBACK
+    # op op_const_high16 FALLBACK
+    # op op_const_wide_16 FALLBACK
+    # op op_const_wide_32 FALLBACK
+    # op op_const_wide FALLBACK
+    # op op_const_wide_high16 FALLBACK
+    # op op_const_string FALLBACK
+    # op op_const_string_jumbo FALLBACK
+    # op op_const_class FALLBACK
+    # op op_monitor_enter FALLBACK
+    # op op_monitor_exit FALLBACK
+    # op op_check_cast FALLBACK
+    # op op_instance_of FALLBACK
+    # op op_array_length FALLBACK
+    # op op_new_instance FALLBACK
+    # op op_new_array FALLBACK
+    # op op_filled_new_array FALLBACK
+    # op op_filled_new_array_range FALLBACK
+    # op op_fill_array_data FALLBACK
+    # op op_throw FALLBACK
+    # op op_goto FALLBACK
+    # op op_goto_16 FALLBACK
+    # op op_goto_32 FALLBACK
+    # op op_packed_switch FALLBACK
+    # op op_sparse_switch FALLBACK
+    # op op_cmpl_float FALLBACK
+    # op op_cmpg_float FALLBACK
+    # op op_cmpl_double FALLBACK
+    # op op_cmpg_double FALLBACK
+    # op op_cmp_long FALLBACK
+    # op op_if_eq FALLBACK
+    # op op_if_ne FALLBACK
+    # op op_if_lt FALLBACK
+    # op op_if_ge FALLBACK
+    # op op_if_gt FALLBACK
+    # op op_if_le FALLBACK
+    # op op_if_eqz FALLBACK
+    # op op_if_nez FALLBACK
+    # op op_if_ltz FALLBACK
+    # op op_if_gez FALLBACK
+    # op op_if_gtz FALLBACK
+    # op op_if_lez FALLBACK
+    # op op_unused_3e FALLBACK
+    # op op_unused_3f FALLBACK
+    # op op_unused_40 FALLBACK
+    # op op_unused_41 FALLBACK
+    # op op_unused_42 FALLBACK
+    # op op_unused_43 FALLBACK
+    # op op_aget FALLBACK
+    # op op_aget_wide FALLBACK
+    # op op_aget_object FALLBACK
+    # op op_aget_boolean FALLBACK
+    # op op_aget_byte FALLBACK
+    # op op_aget_char FALLBACK
+    # op op_aget_short FALLBACK
+    # op op_aput FALLBACK
+    # op op_aput_wide FALLBACK
+    # op op_aput_object FALLBACK
+    # op op_aput_boolean FALLBACK
+    # op op_aput_byte FALLBACK
+    # op op_aput_char FALLBACK
+    # op op_aput_short FALLBACK
+    # op op_iget FALLBACK
+    # op op_iget_wide FALLBACK
+    # op op_iget_object FALLBACK
+    # op op_iget_boolean FALLBACK
+    # op op_iget_byte FALLBACK
+    # op op_iget_char FALLBACK
+    # op op_iget_short FALLBACK
+    # op op_iput FALLBACK
+    # op op_iput_wide FALLBACK
+    # op op_iput_object FALLBACK
+    # op op_iput_boolean FALLBACK
+    # op op_iput_byte FALLBACK
+    # op op_iput_char FALLBACK
+    # op op_iput_short FALLBACK
+    # op op_sget FALLBACK
+    # op op_sget_wide FALLBACK
+    # op op_sget_object FALLBACK
+    # op op_sget_boolean FALLBACK
+    # op op_sget_byte FALLBACK
+    # op op_sget_char FALLBACK
+    # op op_sget_short FALLBACK
+    # op op_sput FALLBACK
+    # op op_sput_wide FALLBACK
+    # op op_sput_object FALLBACK
+    # op op_sput_boolean FALLBACK
+    # op op_sput_byte FALLBACK
+    # op op_sput_char FALLBACK
+    # op op_sput_short FALLBACK
+    # op op_invoke_virtual FALLBACK
+    # op op_invoke_super FALLBACK
+    # op op_invoke_direct FALLBACK
+    # op op_invoke_static FALLBACK
+    # op op_invoke_interface FALLBACK
+    # op op_return_void_no_barrier FALLBACK
+    # op op_invoke_virtual_range FALLBACK
+    # op op_invoke_super_range FALLBACK
+    # op op_invoke_direct_range FALLBACK
+    # op op_invoke_static_range FALLBACK
+    # op op_invoke_interface_range FALLBACK
+    # op op_unused_79 FALLBACK
+    # op op_unused_7a FALLBACK
+    # op op_neg_int FALLBACK
+    # op op_not_int FALLBACK
+    # op op_neg_long FALLBACK
+    # op op_not_long FALLBACK
+    # op op_neg_float FALLBACK
+    # op op_neg_double FALLBACK
+    # op op_int_to_long FALLBACK
+    # op op_int_to_float FALLBACK
+    # op op_int_to_double FALLBACK
+    # op op_long_to_int FALLBACK
+    # op op_long_to_float FALLBACK
+    # op op_long_to_double FALLBACK
+    # op op_float_to_int FALLBACK
+    # op op_float_to_long FALLBACK
+    # op op_float_to_double FALLBACK
+    # op op_double_to_int FALLBACK
+    # op op_double_to_long FALLBACK
+    # op op_double_to_float FALLBACK
+    # op op_int_to_byte FALLBACK
+    # op op_int_to_char FALLBACK
+    # op op_int_to_short FALLBACK
+    # op op_add_int FALLBACK
+    # op op_sub_int FALLBACK
+    # op op_mul_int FALLBACK
+    # op op_div_int FALLBACK
+    # op op_rem_int FALLBACK
+    # op op_and_int FALLBACK
+    # op op_or_int FALLBACK
+    # op op_xor_int FALLBACK
+    # op op_shl_int FALLBACK
+    # op op_shr_int FALLBACK
+    # op op_ushr_int FALLBACK
+    # op op_add_long FALLBACK
+    # op op_sub_long FALLBACK
+    # op op_mul_long FALLBACK
+    # op op_div_long FALLBACK
+    # op op_rem_long FALLBACK
+    # op op_and_long FALLBACK
+    # op op_or_long FALLBACK
+    # op op_xor_long FALLBACK
+    # op op_shl_long FALLBACK
+    # op op_shr_long FALLBACK
+    # op op_ushr_long FALLBACK
+    # op op_add_float FALLBACK
+    # op op_sub_float FALLBACK
+    # op op_mul_float FALLBACK
+    # op op_div_float FALLBACK
+    # op op_rem_float FALLBACK
+    # op op_add_double FALLBACK
+    # op op_sub_double FALLBACK
+    # op op_mul_double FALLBACK
+    # op op_div_double FALLBACK
+    # op op_rem_double FALLBACK
+    # op op_add_int_2addr FALLBACK
+    # op op_sub_int_2addr FALLBACK
+    # op op_mul_int_2addr FALLBACK
+    # op op_div_int_2addr FALLBACK
+    # op op_rem_int_2addr FALLBACK
+    # op op_and_int_2addr FALLBACK
+    # op op_or_int_2addr FALLBACK
+    # op op_xor_int_2addr FALLBACK
+    # op op_shl_int_2addr FALLBACK
+    # op op_shr_int_2addr FALLBACK
+    # op op_ushr_int_2addr FALLBACK
+    # op op_add_long_2addr FALLBACK
+    # op op_sub_long_2addr FALLBACK
+    # op op_mul_long_2addr FALLBACK
+    # op op_div_long_2addr FALLBACK
+    # op op_rem_long_2addr FALLBACK
+    # op op_and_long_2addr FALLBACK
+    # op op_or_long_2addr FALLBACK
+    # op op_xor_long_2addr FALLBACK
+    # op op_shl_long_2addr FALLBACK
+    # op op_shr_long_2addr FALLBACK
+    # op op_ushr_long_2addr FALLBACK
+    # op op_add_float_2addr FALLBACK
+    # op op_sub_float_2addr FALLBACK
+    # op op_mul_float_2addr FALLBACK
+    # op op_div_float_2addr FALLBACK
+    # op op_rem_float_2addr FALLBACK
+    # op op_add_double_2addr FALLBACK
+    # op op_sub_double_2addr FALLBACK
+    # op op_mul_double_2addr FALLBACK
+    # op op_div_double_2addr FALLBACK
+    # op op_rem_double_2addr FALLBACK
+    # op op_add_int_lit16 FALLBACK
+    # op op_rsub_int FALLBACK
+    # op op_mul_int_lit16 FALLBACK
+    # op op_div_int_lit16 FALLBACK
+    # op op_rem_int_lit16 FALLBACK
+    # op op_and_int_lit16 FALLBACK
+    # op op_or_int_lit16 FALLBACK
+    # op op_xor_int_lit16 FALLBACK
+    # op op_add_int_lit8 FALLBACK
+    # op op_rsub_int_lit8 FALLBACK
+    # op op_mul_int_lit8 FALLBACK
+    # op op_div_int_lit8 FALLBACK
+    # op op_rem_int_lit8 FALLBACK
+    # op op_and_int_lit8 FALLBACK
+    # op op_or_int_lit8 FALLBACK
+    # op op_xor_int_lit8 FALLBACK
+    # op op_shl_int_lit8 FALLBACK
+    # op op_shr_int_lit8 FALLBACK
+    # op op_ushr_int_lit8 FALLBACK
+    # op op_iget_quick FALLBACK
+    # op op_iget_wide_quick FALLBACK
+    # op op_iget_object_quick FALLBACK
+    # op op_iput_quick FALLBACK
+    # op op_iput_wide_quick FALLBACK
+    # op op_iput_object_quick FALLBACK
+    # op op_invoke_virtual_quick FALLBACK
+    # op op_invoke_virtual_range_quick FALLBACK
+    # op op_iput_boolean_quick FALLBACK
+    # op op_iput_byte_quick FALLBACK
+    # op op_iput_char_quick FALLBACK
+    # op op_iput_short_quick FALLBACK
+    # op op_iget_boolean_quick FALLBACK
+    # op op_iget_byte_quick FALLBACK
+    # op op_iget_char_quick FALLBACK
+    # op op_iget_short_quick FALLBACK
+    op op_invoke_lambda FALLBACK
+    # op op_unused_f4 FALLBACK
+    op op_capture_variable FALLBACK
+    op op_create_lambda FALLBACK
+    op op_liberate_variable FALLBACK
+    op op_box_lambda FALLBACK
+    op op_unbox_lambda FALLBACK
+    # op op_unused_fa FALLBACK
+    # op op_unused_fb FALLBACK
+    # op op_unused_fc FALLBACK
+    # op op_unused_fd FALLBACK
+    # op op_unused_fe FALLBACK
+    # op op_unused_ff FALLBACK
 op-end
 
 # common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index f804ce5..c40c007 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -36,262 +36,262 @@
     # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
     # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
 
-    op op_nop FALLBACK
-    op op_move FALLBACK
-    op op_move_from16 FALLBACK
-    op op_move_16 FALLBACK
-    op op_move_wide FALLBACK
-    op op_move_wide_from16 FALLBACK
-    op op_move_wide_16 FALLBACK
-    op op_move_object FALLBACK
-    op op_move_object_from16 FALLBACK
-    op op_move_object_16 FALLBACK
-    op op_move_result FALLBACK
-    op op_move_result_wide FALLBACK
-    op op_move_result_object FALLBACK
-    op op_move_exception FALLBACK
-    op op_return_void FALLBACK
-    op op_return FALLBACK
-    op op_return_wide FALLBACK
-    op op_return_object FALLBACK
-    op op_const_4 FALLBACK
-    op op_const_16 FALLBACK
-    op op_const FALLBACK
-    op op_const_high16 FALLBACK
-    op op_const_wide_16 FALLBACK
-    op op_const_wide_32 FALLBACK
-    op op_const_wide FALLBACK
-    op op_const_wide_high16 FALLBACK
-    op op_const_string FALLBACK
-    op op_const_string_jumbo FALLBACK
-    op op_const_class FALLBACK
-    op op_monitor_enter FALLBACK
-    op op_monitor_exit FALLBACK
-    op op_check_cast FALLBACK
-    op op_instance_of FALLBACK
-    op op_array_length FALLBACK
-    op op_new_instance FALLBACK
-    op op_new_array FALLBACK
-    op op_filled_new_array FALLBACK
-    op op_filled_new_array_range FALLBACK
-    op op_fill_array_data FALLBACK
-    op op_throw FALLBACK
-    op op_goto FALLBACK
-    op op_goto_16 FALLBACK
-    op op_goto_32 FALLBACK
-    op op_packed_switch FALLBACK
-    op op_sparse_switch FALLBACK
-    op op_cmpl_float FALLBACK
-    op op_cmpg_float FALLBACK
-    op op_cmpl_double FALLBACK
-    op op_cmpg_double FALLBACK
-    op op_cmp_long FALLBACK
-    op op_if_eq FALLBACK
-    op op_if_ne FALLBACK
-    op op_if_lt FALLBACK
-    op op_if_ge FALLBACK
-    op op_if_gt FALLBACK
-    op op_if_le FALLBACK
-    op op_if_eqz FALLBACK
-    op op_if_nez FALLBACK
-    op op_if_ltz FALLBACK
-    op op_if_gez FALLBACK
-    op op_if_gtz FALLBACK
-    op op_if_lez FALLBACK
-    op_unused_3e FALLBACK
-    op_unused_3f FALLBACK
-    op_unused_40 FALLBACK
-    op_unused_41 FALLBACK
-    op_unused_42 FALLBACK
-    op_unused_43 FALLBACK
-    op op_aget FALLBACK
-    op op_aget_wide FALLBACK
-    op op_aget_object FALLBACK
-    op op_aget_boolean FALLBACK
-    op op_aget_byte FALLBACK
-    op op_aget_char FALLBACK
-    op op_aget_short FALLBACK
-    op op_aput FALLBACK
-    op op_aput_wide FALLBACK
-    op op_aput_object FALLBACK
-    op op_aput_boolean FALLBACK
-    op op_aput_byte FALLBACK
-    op op_aput_char FALLBACK
-    op op_aput_short FALLBACK
-    op op_iget FALLBACK
-    op op_iget_wide FALLBACK
-    op op_iget_object FALLBACK
-    op op_iget_boolean FALLBACK
-    op op_iget_byte FALLBACK
-    op op_iget_char FALLBACK
-    op op_iget_short FALLBACK
-    op op_iput FALLBACK
-    op op_iput_wide FALLBACK
-    op op_iput_object FALLBACK
-    op op_iput_boolean FALLBACK
-    op op_iput_byte FALLBACK
-    op op_iput_char FALLBACK
-    op op_iput_short FALLBACK
-    op op_sget FALLBACK
-    op op_sget_wide FALLBACK
-    op op_sget_object FALLBACK
-    op op_sget_boolean FALLBACK
-    op op_sget_byte FALLBACK
-    op op_sget_char FALLBACK
-    op op_sget_short FALLBACK
-    op op_sput FALLBACK
-    op op_sput_wide FALLBACK
-    op op_sput_object FALLBACK
-    op op_sput_boolean FALLBACK
-    op op_sput_byte FALLBACK
-    op op_sput_char FALLBACK
-    op op_sput_short FALLBACK
-    op op_invoke_virtual FALLBACK
-    op op_invoke_super FALLBACK
-    op op_invoke_direct FALLBACK
-    op op_invoke_static FALLBACK
-    op op_invoke_interface FALLBACK
-    op op_return_void_no_barrier FALLBACK
-    op op_invoke_virtual_range FALLBACK
-    op op_invoke_super_range FALLBACK
-    op op_invoke_direct_range FALLBACK
-    op op_invoke_static_range FALLBACK
-    op op_invoke_interface_range FALLBACK
-    op_unused_79 FALLBACK
-    op_unused_7a FALLBACK
-    op op_neg_int FALLBACK
-    op op_not_int FALLBACK
-    op op_neg_long FALLBACK
-    op op_not_long FALLBACK
-    op op_neg_float FALLBACK
-    op op_neg_double FALLBACK
-    op op_int_to_long FALLBACK
-    op op_int_to_float FALLBACK
-    op op_int_to_double FALLBACK
-    op op_long_to_int FALLBACK
-    op op_long_to_float FALLBACK
-    op op_long_to_double FALLBACK
-    op op_float_to_int FALLBACK
-    op op_float_to_long FALLBACK
-    op op_float_to_double FALLBACK
-    op op_double_to_int FALLBACK
-    op op_double_to_long FALLBACK
-    op op_double_to_float FALLBACK
-    op op_int_to_byte FALLBACK
-    op op_int_to_char FALLBACK
-    op op_int_to_short FALLBACK
-    op op_add_int FALLBACK
-    op op_sub_int FALLBACK
-    op op_mul_int FALLBACK
-    op op_div_int FALLBACK
-    op op_rem_int FALLBACK
-    op op_and_int FALLBACK
-    op op_or_int FALLBACK
-    op op_xor_int FALLBACK
-    op op_shl_int FALLBACK
-    op op_shr_int FALLBACK
-    op op_ushr_int FALLBACK
-    op op_add_long FALLBACK
-    op op_sub_long FALLBACK
-    op op_mul_long FALLBACK
-    op op_div_long FALLBACK
-    op op_rem_long FALLBACK
-    op op_and_long FALLBACK
-    op op_or_long FALLBACK
-    op op_xor_long FALLBACK
-    op op_shl_long FALLBACK
-    op op_shr_long FALLBACK
-    op op_ushr_long FALLBACK
-    op op_add_float FALLBACK
-    op op_sub_float FALLBACK
-    op op_mul_float FALLBACK
-    op op_div_float FALLBACK
-    op op_rem_float FALLBACK
-    op op_add_double FALLBACK
-    op op_sub_double FALLBACK
-    op op_mul_double FALLBACK
-    op op_div_double FALLBACK
-    op op_rem_double FALLBACK
-    op op_add_int_2addr FALLBACK
-    op op_sub_int_2addr FALLBACK
-    op op_mul_int_2addr FALLBACK
-    op op_div_int_2addr FALLBACK
-    op op_rem_int_2addr FALLBACK
-    op op_and_int_2addr FALLBACK
-    op op_or_int_2addr FALLBACK
-    op op_xor_int_2addr FALLBACK
-    op op_shl_int_2addr FALLBACK
-    op op_shr_int_2addr FALLBACK
-    op op_ushr_int_2addr FALLBACK
-    op op_add_long_2addr FALLBACK
-    op op_sub_long_2addr FALLBACK
-    op op_mul_long_2addr FALLBACK
-    op op_div_long_2addr FALLBACK
-    op op_rem_long_2addr FALLBACK
-    op op_and_long_2addr FALLBACK
-    op op_or_long_2addr FALLBACK
-    op op_xor_long_2addr FALLBACK
-    op op_shl_long_2addr FALLBACK
-    op op_shr_long_2addr FALLBACK
-    op op_ushr_long_2addr FALLBACK
-    op op_add_float_2addr FALLBACK
-    op op_sub_float_2addr FALLBACK
-    op op_mul_float_2addr FALLBACK
-    op op_div_float_2addr FALLBACK
-    op op_rem_float_2addr FALLBACK
-    op op_add_double_2addr FALLBACK
-    op op_sub_double_2addr FALLBACK
-    op op_mul_double_2addr FALLBACK
-    op op_div_double_2addr FALLBACK
-    op op_rem_double_2addr FALLBACK
-    op op_add_int_lit16 FALLBACK
-    op op_rsub_int FALLBACK
-    op op_mul_int_lit16 FALLBACK
-    op op_div_int_lit16 FALLBACK
-    op op_rem_int_lit16 FALLBACK
-    op op_and_int_lit16 FALLBACK
-    op op_or_int_lit16 FALLBACK
-    op op_xor_int_lit16 FALLBACK
-    op op_add_int_lit8 FALLBACK
-    op op_rsub_int_lit8 FALLBACK
-    op op_mul_int_lit8 FALLBACK
-    op op_div_int_lit8 FALLBACK
-    op op_rem_int_lit8 FALLBACK
-    op op_and_int_lit8 FALLBACK
-    op op_or_int_lit8 FALLBACK
-    op op_xor_int_lit8 FALLBACK
-    op op_shl_int_lit8 FALLBACK
-    op op_shr_int_lit8 FALLBACK
-    op op_ushr_int_lit8 FALLBACK
-    op op_iget_quick FALLBACK
-    op op_iget_wide_quick FALLBACK
-    op op_iget_object_quick FALLBACK
-    op op_iput_quick FALLBACK
-    op op_iput_wide_quick FALLBACK
-    op op_iput_object_quick FALLBACK
-    op op_invoke_virtual_quick FALLBACK
-    op op_invoke_virtual_range_quick FALLBACK
-    op op_iput_boolean_quick FALLBACK
-    op op_iput_byte_quick FALLBACK
-    op op_iput_char_quick FALLBACK
-    op op_iput_short_quick FALLBACK
-    op op_iget_boolean_quick FALLBACK
-    op op_iget_byte_quick FALLBACK
-    op op_iget_char_quick FALLBACK
-    op op_iget_short_quick FALLBACK
-    op_unused_f3 FALLBACK
-    op_unused_f4 FALLBACK
-    op_unused_f5 FALLBACK
-    op_unused_f6 FALLBACK
-    op_unused_f7 FALLBACK
-    op_unused_f8 FALLBACK
-    op_unused_f9 FALLBACK
-    op_unused_fa FALLBACK
-    op_unused_fb FALLBACK
-    op_unused_fc FALLBACK
-    op_unused_fd FALLBACK
-    op_unused_fe FALLBACK
-    op_unused_ff FALLBACK
+    # op op_nop FALLBACK
+    # op op_move FALLBACK
+    # op op_move_from16 FALLBACK
+    # op op_move_16 FALLBACK
+    # op op_move_wide FALLBACK
+    # op op_move_wide_from16 FALLBACK
+    # op op_move_wide_16 FALLBACK
+    # op op_move_object FALLBACK
+    # op op_move_object_from16 FALLBACK
+    # op op_move_object_16 FALLBACK
+    # op op_move_result FALLBACK
+    # op op_move_result_wide FALLBACK
+    # op op_move_result_object FALLBACK
+    # op op_move_exception FALLBACK
+    # op op_return_void FALLBACK
+    # op op_return FALLBACK
+    # op op_return_wide FALLBACK
+    # op op_return_object FALLBACK
+    # op op_const_4 FALLBACK
+    # op op_const_16 FALLBACK
+    # op op_const FALLBACK
+    # op op_const_high16 FALLBACK
+    # op op_const_wide_16 FALLBACK
+    # op op_const_wide_32 FALLBACK
+    # op op_const_wide FALLBACK
+    # op op_const_wide_high16 FALLBACK
+    # op op_const_string FALLBACK
+    # op op_const_string_jumbo FALLBACK
+    # op op_const_class FALLBACK
+    # op op_monitor_enter FALLBACK
+    # op op_monitor_exit FALLBACK
+    # op op_check_cast FALLBACK
+    # op op_instance_of FALLBACK
+    # op op_array_length FALLBACK
+    # op op_new_instance FALLBACK
+    # op op_new_array FALLBACK
+    # op op_filled_new_array FALLBACK
+    # op op_filled_new_array_range FALLBACK
+    # op op_fill_array_data FALLBACK
+    # op op_throw FALLBACK
+    # op op_goto FALLBACK
+    # op op_goto_16 FALLBACK
+    # op op_goto_32 FALLBACK
+    # op op_packed_switch FALLBACK
+    # op op_sparse_switch FALLBACK
+    # op op_cmpl_float FALLBACK
+    # op op_cmpg_float FALLBACK
+    # op op_cmpl_double FALLBACK
+    # op op_cmpg_double FALLBACK
+    # op op_cmp_long FALLBACK
+    # op op_if_eq FALLBACK
+    # op op_if_ne FALLBACK
+    # op op_if_lt FALLBACK
+    # op op_if_ge FALLBACK
+    # op op_if_gt FALLBACK
+    # op op_if_le FALLBACK
+    # op op_if_eqz FALLBACK
+    # op op_if_nez FALLBACK
+    # op op_if_ltz FALLBACK
+    # op op_if_gez FALLBACK
+    # op op_if_gtz FALLBACK
+    # op op_if_lez FALLBACK
+    # op op_unused_3e FALLBACK
+    # op op_unused_3f FALLBACK
+    # op op_unused_40 FALLBACK
+    # op op_unused_41 FALLBACK
+    # op op_unused_42 FALLBACK
+    # op op_unused_43 FALLBACK
+    # op op_aget FALLBACK
+    # op op_aget_wide FALLBACK
+    # op op_aget_object FALLBACK
+    # op op_aget_boolean FALLBACK
+    # op op_aget_byte FALLBACK
+    # op op_aget_char FALLBACK
+    # op op_aget_short FALLBACK
+    # op op_aput FALLBACK
+    # op op_aput_wide FALLBACK
+    # op op_aput_object FALLBACK
+    # op op_aput_boolean FALLBACK
+    # op op_aput_byte FALLBACK
+    # op op_aput_char FALLBACK
+    # op op_aput_short FALLBACK
+    # op op_iget FALLBACK
+    # op op_iget_wide FALLBACK
+    # op op_iget_object FALLBACK
+    # op op_iget_boolean FALLBACK
+    # op op_iget_byte FALLBACK
+    # op op_iget_char FALLBACK
+    # op op_iget_short FALLBACK
+    # op op_iput FALLBACK
+    # op op_iput_wide FALLBACK
+    # op op_iput_object FALLBACK
+    # op op_iput_boolean FALLBACK
+    # op op_iput_byte FALLBACK
+    # op op_iput_char FALLBACK
+    # op op_iput_short FALLBACK
+    # op op_sget FALLBACK
+    # op op_sget_wide FALLBACK
+    # op op_sget_object FALLBACK
+    # op op_sget_boolean FALLBACK
+    # op op_sget_byte FALLBACK
+    # op op_sget_char FALLBACK
+    # op op_sget_short FALLBACK
+    # op op_sput FALLBACK
+    # op op_sput_wide FALLBACK
+    # op op_sput_object FALLBACK
+    # op op_sput_boolean FALLBACK
+    # op op_sput_byte FALLBACK
+    # op op_sput_char FALLBACK
+    # op op_sput_short FALLBACK
+    # op op_invoke_virtual FALLBACK
+    # op op_invoke_super FALLBACK
+    # op op_invoke_direct FALLBACK
+    # op op_invoke_static FALLBACK
+    # op op_invoke_interface FALLBACK
+    # op op_return_void_no_barrier FALLBACK
+    # op op_invoke_virtual_range FALLBACK
+    # op op_invoke_super_range FALLBACK
+    # op op_invoke_direct_range FALLBACK
+    # op op_invoke_static_range FALLBACK
+    # op op_invoke_interface_range FALLBACK
+    # op op_unused_79 FALLBACK
+    # op op_unused_7a FALLBACK
+    # op op_neg_int FALLBACK
+    # op op_not_int FALLBACK
+    # op op_neg_long FALLBACK
+    # op op_not_long FALLBACK
+    # op op_neg_float FALLBACK
+    # op op_neg_double FALLBACK
+    # op op_int_to_long FALLBACK
+    # op op_int_to_float FALLBACK
+    # op op_int_to_double FALLBACK
+    # op op_long_to_int FALLBACK
+    # op op_long_to_float FALLBACK
+    # op op_long_to_double FALLBACK
+    # op op_float_to_int FALLBACK
+    # op op_float_to_long FALLBACK
+    # op op_float_to_double FALLBACK
+    # op op_double_to_int FALLBACK
+    # op op_double_to_long FALLBACK
+    # op op_double_to_float FALLBACK
+    # op op_int_to_byte FALLBACK
+    # op op_int_to_char FALLBACK
+    # op op_int_to_short FALLBACK
+    # op op_add_int FALLBACK
+    # op op_sub_int FALLBACK
+    # op op_mul_int FALLBACK
+    # op op_div_int FALLBACK
+    # op op_rem_int FALLBACK
+    # op op_and_int FALLBACK
+    # op op_or_int FALLBACK
+    # op op_xor_int FALLBACK
+    # op op_shl_int FALLBACK
+    # op op_shr_int FALLBACK
+    # op op_ushr_int FALLBACK
+    # op op_add_long FALLBACK
+    # op op_sub_long FALLBACK
+    # op op_mul_long FALLBACK
+    # op op_div_long FALLBACK
+    # op op_rem_long FALLBACK
+    # op op_and_long FALLBACK
+    # op op_or_long FALLBACK
+    # op op_xor_long FALLBACK
+    # op op_shl_long FALLBACK
+    # op op_shr_long FALLBACK
+    # op op_ushr_long FALLBACK
+    # op op_add_float FALLBACK
+    # op op_sub_float FALLBACK
+    # op op_mul_float FALLBACK
+    # op op_div_float FALLBACK
+    # op op_rem_float FALLBACK
+    # op op_add_double FALLBACK
+    # op op_sub_double FALLBACK
+    # op op_mul_double FALLBACK
+    # op op_div_double FALLBACK
+    # op op_rem_double FALLBACK
+    # op op_add_int_2addr FALLBACK
+    # op op_sub_int_2addr FALLBACK
+    # op op_mul_int_2addr FALLBACK
+    # op op_div_int_2addr FALLBACK
+    # op op_rem_int_2addr FALLBACK
+    # op op_and_int_2addr FALLBACK
+    # op op_or_int_2addr FALLBACK
+    # op op_xor_int_2addr FALLBACK
+    # op op_shl_int_2addr FALLBACK
+    # op op_shr_int_2addr FALLBACK
+    # op op_ushr_int_2addr FALLBACK
+    # op op_add_long_2addr FALLBACK
+    # op op_sub_long_2addr FALLBACK
+    # op op_mul_long_2addr FALLBACK
+    # op op_div_long_2addr FALLBACK
+    # op op_rem_long_2addr FALLBACK
+    # op op_and_long_2addr FALLBACK
+    # op op_or_long_2addr FALLBACK
+    # op op_xor_long_2addr FALLBACK
+    # op op_shl_long_2addr FALLBACK
+    # op op_shr_long_2addr FALLBACK
+    # op op_ushr_long_2addr FALLBACK
+    # op op_add_float_2addr FALLBACK
+    # op op_sub_float_2addr FALLBACK
+    # op op_mul_float_2addr FALLBACK
+    # op op_div_float_2addr FALLBACK
+    # op op_rem_float_2addr FALLBACK
+    # op op_add_double_2addr FALLBACK
+    # op op_sub_double_2addr FALLBACK
+    # op op_mul_double_2addr FALLBACK
+    # op op_div_double_2addr FALLBACK
+    # op op_rem_double_2addr FALLBACK
+    # op op_add_int_lit16 FALLBACK
+    # op op_rsub_int FALLBACK
+    # op op_mul_int_lit16 FALLBACK
+    # op op_div_int_lit16 FALLBACK
+    # op op_rem_int_lit16 FALLBACK
+    # op op_and_int_lit16 FALLBACK
+    # op op_or_int_lit16 FALLBACK
+    # op op_xor_int_lit16 FALLBACK
+    # op op_add_int_lit8 FALLBACK
+    # op op_rsub_int_lit8 FALLBACK
+    # op op_mul_int_lit8 FALLBACK
+    # op op_div_int_lit8 FALLBACK
+    # op op_rem_int_lit8 FALLBACK
+    # op op_and_int_lit8 FALLBACK
+    # op op_or_int_lit8 FALLBACK
+    # op op_xor_int_lit8 FALLBACK
+    # op op_shl_int_lit8 FALLBACK
+    # op op_shr_int_lit8 FALLBACK
+    # op op_ushr_int_lit8 FALLBACK
+    # op op_iget_quick FALLBACK
+    # op op_iget_wide_quick FALLBACK
+    # op op_iget_object_quick FALLBACK
+    # op op_iput_quick FALLBACK
+    # op op_iput_wide_quick FALLBACK
+    # op op_iput_object_quick FALLBACK
+    # op op_invoke_virtual_quick FALLBACK
+    # op op_invoke_virtual_range_quick FALLBACK
+    # op op_iput_boolean_quick FALLBACK
+    # op op_iput_byte_quick FALLBACK
+    # op op_iput_char_quick FALLBACK
+    # op op_iput_short_quick FALLBACK
+    # op op_iget_boolean_quick FALLBACK
+    # op op_iget_byte_quick FALLBACK
+    # op op_iget_char_quick FALLBACK
+    # op op_iget_short_quick FALLBACK
+    op op_invoke_lambda FALLBACK
+    # op op_unused_f4 FALLBACK
+    op op_capture_variable FALLBACK
+    op op_create_lambda FALLBACK
+    op op_liberate_variable FALLBACK
+    op op_box_lambda FALLBACK
+    op op_unbox_lambda FALLBACK
+    # op op_unused_fa FALLBACK
+    # op op_unused_fb FALLBACK
+    # op op_unused_fc FALLBACK
+    # op op_unused_fd FALLBACK
+    # op op_unused_fe FALLBACK
+    # op op_unused_ff FALLBACK
 op-end
 
 # common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index a002dc2..1d7eb03 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -19,6 +19,10 @@
 handler-style computed-goto
 handler-size 128
 
+function-type-format FUNCTION_TYPE(%s)
+function-size-format SIZE(%s,%s)
+global-name-format SYMBOL(%s)
+
 # source for alternate entry stub
 asm-alt-stub x86_64/alt_stub.S
 
@@ -36,262 +40,262 @@
     # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
     # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
 
-    op op_nop FALLBACK
-    op op_move FALLBACK
-    op op_move_from16 FALLBACK
-    op op_move_16 FALLBACK
-    op op_move_wide FALLBACK
-    op op_move_wide_from16 FALLBACK
-    op op_move_wide_16 FALLBACK
-    op op_move_object FALLBACK
-    op op_move_object_from16 FALLBACK
-    op op_move_object_16 FALLBACK
-    op op_move_result FALLBACK
-    op op_move_result_wide FALLBACK
-    op op_move_result_object FALLBACK
-    op op_move_exception FALLBACK
-    op op_return_void FALLBACK
-    op op_return FALLBACK
-    op op_return_wide FALLBACK
-    op op_return_object FALLBACK
-    op op_const_4 FALLBACK
-    op op_const_16 FALLBACK
-    op op_const FALLBACK
-    op op_const_high16 FALLBACK
-    op op_const_wide_16 FALLBACK
-    op op_const_wide_32 FALLBACK
-    op op_const_wide FALLBACK
-    op op_const_wide_high16 FALLBACK
-    op op_const_string FALLBACK
-    op op_const_string_jumbo FALLBACK
-    op op_const_class FALLBACK
-    op op_monitor_enter FALLBACK
-    op op_monitor_exit FALLBACK
-    op op_check_cast FALLBACK
-    op op_instance_of FALLBACK
-    op op_array_length FALLBACK
-    op op_new_instance FALLBACK
-    op op_new_array FALLBACK
-    op op_filled_new_array FALLBACK
-    op op_filled_new_array_range FALLBACK
-    op op_fill_array_data FALLBACK
-    op op_throw FALLBACK
-    op op_goto FALLBACK
-    op op_goto_16 FALLBACK
-    op op_goto_32 FALLBACK
-    op op_packed_switch FALLBACK
-    op op_sparse_switch FALLBACK
-    op op_cmpl_float FALLBACK
-    op op_cmpg_float FALLBACK
-    op op_cmpl_double FALLBACK
-    op op_cmpg_double FALLBACK
-    op op_cmp_long FALLBACK
-    op op_if_eq FALLBACK
-    op op_if_ne FALLBACK
-    op op_if_lt FALLBACK
-    op op_if_ge FALLBACK
-    op op_if_gt FALLBACK
-    op op_if_le FALLBACK
-    op op_if_eqz FALLBACK
-    op op_if_nez FALLBACK
-    op op_if_ltz FALLBACK
-    op op_if_gez FALLBACK
-    op op_if_gtz FALLBACK
-    op op_if_lez FALLBACK
-    op_unused_3e FALLBACK
-    op_unused_3f FALLBACK
-    op_unused_40 FALLBACK
-    op_unused_41 FALLBACK
-    op_unused_42 FALLBACK
-    op_unused_43 FALLBACK
-    op op_aget FALLBACK
-    op op_aget_wide FALLBACK
-    op op_aget_object FALLBACK
-    op op_aget_boolean FALLBACK
-    op op_aget_byte FALLBACK
-    op op_aget_char FALLBACK
-    op op_aget_short FALLBACK
-    op op_aput FALLBACK
-    op op_aput_wide FALLBACK
-    op op_aput_object FALLBACK
-    op op_aput_boolean FALLBACK
-    op op_aput_byte FALLBACK
-    op op_aput_char FALLBACK
-    op op_aput_short FALLBACK
-    op op_iget FALLBACK
-    op op_iget_wide FALLBACK
-    op op_iget_object FALLBACK
-    op op_iget_boolean FALLBACK
-    op op_iget_byte FALLBACK
-    op op_iget_char FALLBACK
-    op op_iget_short FALLBACK
-    op op_iput FALLBACK
-    op op_iput_wide FALLBACK
-    op op_iput_object FALLBACK
-    op op_iput_boolean FALLBACK
-    op op_iput_byte FALLBACK
-    op op_iput_char FALLBACK
-    op op_iput_short FALLBACK
-    op op_sget FALLBACK
-    op op_sget_wide FALLBACK
-    op op_sget_object FALLBACK
-    op op_sget_boolean FALLBACK
-    op op_sget_byte FALLBACK
-    op op_sget_char FALLBACK
-    op op_sget_short FALLBACK
-    op op_sput FALLBACK
-    op op_sput_wide FALLBACK
-    op op_sput_object FALLBACK
-    op op_sput_boolean FALLBACK
-    op op_sput_byte FALLBACK
-    op op_sput_char FALLBACK
-    op op_sput_short FALLBACK
-    op op_invoke_virtual FALLBACK
-    op op_invoke_super FALLBACK
-    op op_invoke_direct FALLBACK
-    op op_invoke_static FALLBACK
-    op op_invoke_interface FALLBACK
-    op op_return_void_no_barrier FALLBACK
-    op op_invoke_virtual_range FALLBACK
-    op op_invoke_super_range FALLBACK
-    op op_invoke_direct_range FALLBACK
-    op op_invoke_static_range FALLBACK
-    op op_invoke_interface_range FALLBACK
-    op_unused_79 FALLBACK
-    op_unused_7a FALLBACK
-    op op_neg_int FALLBACK
-    op op_not_int FALLBACK
-    op op_neg_long FALLBACK
-    op op_not_long FALLBACK
-    op op_neg_float FALLBACK
-    op op_neg_double FALLBACK
-    op op_int_to_long FALLBACK
-    op op_int_to_float FALLBACK
-    op op_int_to_double FALLBACK
-    op op_long_to_int FALLBACK
-    op op_long_to_float FALLBACK
-    op op_long_to_double FALLBACK
-    op op_float_to_int FALLBACK
-    op op_float_to_long FALLBACK
-    op op_float_to_double FALLBACK
-    op op_double_to_int FALLBACK
-    op op_double_to_long FALLBACK
-    op op_double_to_float FALLBACK
-    op op_int_to_byte FALLBACK
-    op op_int_to_char FALLBACK
-    op op_int_to_short FALLBACK
-    op op_add_int FALLBACK
-    op op_sub_int FALLBACK
-    op op_mul_int FALLBACK
-    op op_div_int FALLBACK
-    op op_rem_int FALLBACK
-    op op_and_int FALLBACK
-    op op_or_int FALLBACK
-    op op_xor_int FALLBACK
-    op op_shl_int FALLBACK
-    op op_shr_int FALLBACK
-    op op_ushr_int FALLBACK
-    op op_add_long FALLBACK
-    op op_sub_long FALLBACK
-    op op_mul_long FALLBACK
-    op op_div_long FALLBACK
-    op op_rem_long FALLBACK
-    op op_and_long FALLBACK
-    op op_or_long FALLBACK
-    op op_xor_long FALLBACK
-    op op_shl_long FALLBACK
-    op op_shr_long FALLBACK
-    op op_ushr_long FALLBACK
-    op op_add_float FALLBACK
-    op op_sub_float FALLBACK
-    op op_mul_float FALLBACK
-    op op_div_float FALLBACK
-    op op_rem_float FALLBACK
-    op op_add_double FALLBACK
-    op op_sub_double FALLBACK
-    op op_mul_double FALLBACK
-    op op_div_double FALLBACK
-    op op_rem_double FALLBACK
-    op op_add_int_2addr FALLBACK
-    op op_sub_int_2addr FALLBACK
-    op op_mul_int_2addr FALLBACK
-    op op_div_int_2addr FALLBACK
-    op op_rem_int_2addr FALLBACK
-    op op_and_int_2addr FALLBACK
-    op op_or_int_2addr FALLBACK
-    op op_xor_int_2addr FALLBACK
-    op op_shl_int_2addr FALLBACK
-    op op_shr_int_2addr FALLBACK
-    op op_ushr_int_2addr FALLBACK
-    op op_add_long_2addr FALLBACK
-    op op_sub_long_2addr FALLBACK
-    op op_mul_long_2addr FALLBACK
-    op op_div_long_2addr FALLBACK
-    op op_rem_long_2addr FALLBACK
-    op op_and_long_2addr FALLBACK
-    op op_or_long_2addr FALLBACK
-    op op_xor_long_2addr FALLBACK
-    op op_shl_long_2addr FALLBACK
-    op op_shr_long_2addr FALLBACK
-    op op_ushr_long_2addr FALLBACK
-    op op_add_float_2addr FALLBACK
-    op op_sub_float_2addr FALLBACK
-    op op_mul_float_2addr FALLBACK
-    op op_div_float_2addr FALLBACK
-    op op_rem_float_2addr FALLBACK
-    op op_add_double_2addr FALLBACK
-    op op_sub_double_2addr FALLBACK
-    op op_mul_double_2addr FALLBACK
-    op op_div_double_2addr FALLBACK
-    op op_rem_double_2addr FALLBACK
-    op op_add_int_lit16 FALLBACK
-    op op_rsub_int FALLBACK
-    op op_mul_int_lit16 FALLBACK
-    op op_div_int_lit16 FALLBACK
-    op op_rem_int_lit16 FALLBACK
-    op op_and_int_lit16 FALLBACK
-    op op_or_int_lit16 FALLBACK
-    op op_xor_int_lit16 FALLBACK
-    op op_add_int_lit8 FALLBACK
-    op op_rsub_int_lit8 FALLBACK
-    op op_mul_int_lit8 FALLBACK
-    op op_div_int_lit8 FALLBACK
-    op op_rem_int_lit8 FALLBACK
-    op op_and_int_lit8 FALLBACK
-    op op_or_int_lit8 FALLBACK
-    op op_xor_int_lit8 FALLBACK
-    op op_shl_int_lit8 FALLBACK
-    op op_shr_int_lit8 FALLBACK
-    op op_ushr_int_lit8 FALLBACK
-    op op_iget_quick FALLBACK
-    op op_iget_wide_quick FALLBACK
-    op op_iget_object_quick FALLBACK
-    op op_iput_quick FALLBACK
-    op op_iput_wide_quick FALLBACK
-    op op_iput_object_quick FALLBACK
-    op op_invoke_virtual_quick FALLBACK
-    op op_invoke_virtual_range_quick FALLBACK
-    op op_iput_boolean_quick FALLBACK
-    op op_iput_byte_quick FALLBACK
-    op op_iput_char_quick FALLBACK
-    op op_iput_short_quick FALLBACK
-    op op_iget_boolean_quick FALLBACK
-    op op_iget_byte_quick FALLBACK
-    op op_iget_char_quick FALLBACK
-    op op_iget_short_quick FALLBACK
-    op_unused_f3 FALLBACK
-    op_unused_f4 FALLBACK
-    op_unused_f5 FALLBACK
-    op_unused_f6 FALLBACK
-    op_unused_f7 FALLBACK
-    op_unused_f8 FALLBACK
-    op_unused_f9 FALLBACK
-    op_unused_fa FALLBACK
-    op_unused_fb FALLBACK
-    op_unused_fc FALLBACK
-    op_unused_fd FALLBACK
-    op_unused_fe FALLBACK
-    op_unused_ff FALLBACK
+    # op op_nop FALLBACK
+    # op op_move FALLBACK
+    # op op_move_from16 FALLBACK
+    # op op_move_16 FALLBACK
+    # op op_move_wide FALLBACK
+    # op op_move_wide_from16 FALLBACK
+    # op op_move_wide_16 FALLBACK
+    # op op_move_object FALLBACK
+    # op op_move_object_from16 FALLBACK
+    # op op_move_object_16 FALLBACK
+    # op op_move_result FALLBACK
+    # op op_move_result_wide FALLBACK
+    # op op_move_result_object FALLBACK
+    # op op_move_exception FALLBACK
+    # op op_return_void FALLBACK
+    # op op_return FALLBACK
+    # op op_return_wide FALLBACK
+    # op op_return_object FALLBACK
+    # op op_const_4 FALLBACK
+    # op op_const_16 FALLBACK
+    # op op_const FALLBACK
+    # op op_const_high16 FALLBACK
+    # op op_const_wide_16 FALLBACK
+    # op op_const_wide_32 FALLBACK
+    # op op_const_wide FALLBACK
+    # op op_const_wide_high16 FALLBACK
+    # op op_const_string FALLBACK
+    # op op_const_string_jumbo FALLBACK
+    # op op_const_class FALLBACK
+    # op op_monitor_enter FALLBACK
+    # op op_monitor_exit FALLBACK
+    # op op_check_cast FALLBACK
+    # op op_instance_of FALLBACK
+    # op op_array_length FALLBACK
+    # op op_new_instance FALLBACK
+    # op op_new_array FALLBACK
+    # op op_filled_new_array FALLBACK
+    # op op_filled_new_array_range FALLBACK
+    # op op_fill_array_data FALLBACK
+    # op op_throw FALLBACK
+    # op op_goto FALLBACK
+    # op op_goto_16 FALLBACK
+    # op op_goto_32 FALLBACK
+    # op op_packed_switch FALLBACK
+    # op op_sparse_switch FALLBACK
+    # op op_cmpl_float FALLBACK
+    # op op_cmpg_float FALLBACK
+    # op op_cmpl_double FALLBACK
+    # op op_cmpg_double FALLBACK
+    # op op_cmp_long FALLBACK
+    # op op_if_eq FALLBACK
+    # op op_if_ne FALLBACK
+    # op op_if_lt FALLBACK
+    # op op_if_ge FALLBACK
+    # op op_if_gt FALLBACK
+    # op op_if_le FALLBACK
+    # op op_if_eqz FALLBACK
+    # op op_if_nez FALLBACK
+    # op op_if_ltz FALLBACK
+    # op op_if_gez FALLBACK
+    # op op_if_gtz FALLBACK
+    # op op_if_lez FALLBACK
+    # op op_unused_3e FALLBACK
+    # op op_unused_3f FALLBACK
+    # op op_unused_40 FALLBACK
+    # op op_unused_41 FALLBACK
+    # op op_unused_42 FALLBACK
+    # op op_unused_43 FALLBACK
+    # op op_aget FALLBACK
+    # op op_aget_wide FALLBACK
+    # op op_aget_object FALLBACK
+    # op op_aget_boolean FALLBACK
+    # op op_aget_byte FALLBACK
+    # op op_aget_char FALLBACK
+    # op op_aget_short FALLBACK
+    # op op_aput FALLBACK
+    # op op_aput_wide FALLBACK
+    # op op_aput_object FALLBACK
+    # op op_aput_boolean FALLBACK
+    # op op_aput_byte FALLBACK
+    # op op_aput_char FALLBACK
+    # op op_aput_short FALLBACK
+    # op op_iget FALLBACK
+    # op op_iget_wide FALLBACK
+    # op op_iget_object FALLBACK
+    # op op_iget_boolean FALLBACK
+    # op op_iget_byte FALLBACK
+    # op op_iget_char FALLBACK
+    # op op_iget_short FALLBACK
+    # op op_iput FALLBACK
+    # op op_iput_wide FALLBACK
+    # op op_iput_object FALLBACK
+    # op op_iput_boolean FALLBACK
+    # op op_iput_byte FALLBACK
+    # op op_iput_char FALLBACK
+    # op op_iput_short FALLBACK
+    # op op_sget FALLBACK
+    # op op_sget_wide FALLBACK
+    # op op_sget_object FALLBACK
+    # op op_sget_boolean FALLBACK
+    # op op_sget_byte FALLBACK
+    # op op_sget_char FALLBACK
+    # op op_sget_short FALLBACK
+    # op op_sput FALLBACK
+    # op op_sput_wide FALLBACK
+    # op op_sput_object FALLBACK
+    # op op_sput_boolean FALLBACK
+    # op op_sput_byte FALLBACK
+    # op op_sput_char FALLBACK
+    # op op_sput_short FALLBACK
+    # op op_invoke_virtual FALLBACK
+    # op op_invoke_super FALLBACK
+    # op op_invoke_direct FALLBACK
+    # op op_invoke_static FALLBACK
+    # op op_invoke_interface FALLBACK
+    # op op_return_void_no_barrier FALLBACK
+    # op op_invoke_virtual_range FALLBACK
+    # op op_invoke_super_range FALLBACK
+    # op op_invoke_direct_range FALLBACK
+    # op op_invoke_static_range FALLBACK
+    # op op_invoke_interface_range FALLBACK
+    # op op_unused_79 FALLBACK
+    # op op_unused_7a FALLBACK
+    # op op_neg_int FALLBACK
+    # op op_not_int FALLBACK
+    # op op_neg_long FALLBACK
+    # op op_not_long FALLBACK
+    # op op_neg_float FALLBACK
+    # op op_neg_double FALLBACK
+    # op op_int_to_long FALLBACK
+    # op op_int_to_float FALLBACK
+    # op op_int_to_double FALLBACK
+    # op op_long_to_int FALLBACK
+    # op op_long_to_float FALLBACK
+    # op op_long_to_double FALLBACK
+    # op op_float_to_int FALLBACK
+    # op op_float_to_long FALLBACK
+    # op op_float_to_double FALLBACK
+    # op op_double_to_int FALLBACK
+    # op op_double_to_long FALLBACK
+    # op op_double_to_float FALLBACK
+    # op op_int_to_byte FALLBACK
+    # op op_int_to_char FALLBACK
+    # op op_int_to_short FALLBACK
+    # op op_add_int FALLBACK
+    # op op_sub_int FALLBACK
+    # op op_mul_int FALLBACK
+    # op op_div_int FALLBACK
+    # op op_rem_int FALLBACK
+    # op op_and_int FALLBACK
+    # op op_or_int FALLBACK
+    # op op_xor_int FALLBACK
+    # op op_shl_int FALLBACK
+    # op op_shr_int FALLBACK
+    # op op_ushr_int FALLBACK
+    # op op_add_long FALLBACK
+    # op op_sub_long FALLBACK
+    # op op_mul_long FALLBACK
+    # op op_div_long FALLBACK
+    # op op_rem_long FALLBACK
+    # op op_and_long FALLBACK
+    # op op_or_long FALLBACK
+    # op op_xor_long FALLBACK
+    # op op_shl_long FALLBACK
+    # op op_shr_long FALLBACK
+    # op op_ushr_long FALLBACK
+    # op op_add_float FALLBACK
+    # op op_sub_float FALLBACK
+    # op op_mul_float FALLBACK
+    # op op_div_float FALLBACK
+    # op op_rem_float FALLBACK
+    # op op_add_double FALLBACK
+    # op op_sub_double FALLBACK
+    # op op_mul_double FALLBACK
+    # op op_div_double FALLBACK
+    # op op_rem_double FALLBACK
+    # op op_add_int_2addr FALLBACK
+    # op op_sub_int_2addr FALLBACK
+    # op op_mul_int_2addr FALLBACK
+    # op op_div_int_2addr FALLBACK
+    # op op_rem_int_2addr FALLBACK
+    # op op_and_int_2addr FALLBACK
+    # op op_or_int_2addr FALLBACK
+    # op op_xor_int_2addr FALLBACK
+    # op op_shl_int_2addr FALLBACK
+    # op op_shr_int_2addr FALLBACK
+    # op op_ushr_int_2addr FALLBACK
+    # op op_add_long_2addr FALLBACK
+    # op op_sub_long_2addr FALLBACK
+    # op op_mul_long_2addr FALLBACK
+    # op op_div_long_2addr FALLBACK
+    # op op_rem_long_2addr FALLBACK
+    # op op_and_long_2addr FALLBACK
+    # op op_or_long_2addr FALLBACK
+    # op op_xor_long_2addr FALLBACK
+    # op op_shl_long_2addr FALLBACK
+    # op op_shr_long_2addr FALLBACK
+    # op op_ushr_long_2addr FALLBACK
+    # op op_add_float_2addr FALLBACK
+    # op op_sub_float_2addr FALLBACK
+    # op op_mul_float_2addr FALLBACK
+    # op op_div_float_2addr FALLBACK
+    # op op_rem_float_2addr FALLBACK
+    # op op_add_double_2addr FALLBACK
+    # op op_sub_double_2addr FALLBACK
+    # op op_mul_double_2addr FALLBACK
+    # op op_div_double_2addr FALLBACK
+    # op op_rem_double_2addr FALLBACK
+    # op op_add_int_lit16 FALLBACK
+    # op op_rsub_int FALLBACK
+    # op op_mul_int_lit16 FALLBACK
+    # op op_div_int_lit16 FALLBACK
+    # op op_rem_int_lit16 FALLBACK
+    # op op_and_int_lit16 FALLBACK
+    # op op_or_int_lit16 FALLBACK
+    # op op_xor_int_lit16 FALLBACK
+    # op op_add_int_lit8 FALLBACK
+    # op op_rsub_int_lit8 FALLBACK
+    # op op_mul_int_lit8 FALLBACK
+    # op op_div_int_lit8 FALLBACK
+    # op op_rem_int_lit8 FALLBACK
+    # op op_and_int_lit8 FALLBACK
+    # op op_or_int_lit8 FALLBACK
+    # op op_xor_int_lit8 FALLBACK
+    # op op_shl_int_lit8 FALLBACK
+    # op op_shr_int_lit8 FALLBACK
+    # op op_ushr_int_lit8 FALLBACK
+    # op op_iget_quick FALLBACK
+    # op op_iget_wide_quick FALLBACK
+    # op op_iget_object_quick FALLBACK
+    # op op_iput_quick FALLBACK
+    # op op_iput_wide_quick FALLBACK
+    # op op_iput_object_quick FALLBACK
+    # op op_invoke_virtual_quick FALLBACK
+    # op op_invoke_virtual_range_quick FALLBACK
+    # op op_iput_boolean_quick FALLBACK
+    # op op_iput_byte_quick FALLBACK
+    # op op_iput_char_quick FALLBACK
+    # op op_iput_short_quick FALLBACK
+    # op op_iget_boolean_quick FALLBACK
+    # op op_iget_byte_quick FALLBACK
+    # op op_iget_char_quick FALLBACK
+    # op op_iget_short_quick FALLBACK
+    op op_invoke_lambda FALLBACK
+    # op op_unused_f4 FALLBACK
+    op op_capture_variable FALLBACK
+    op op_create_lambda FALLBACK
+    op op_liberate_variable FALLBACK
+    op op_box_lambda FALLBACK
+    op op_unbox_lambda FALLBACK
+    # op op_unused_fa FALLBACK
+    # op op_unused_fb FALLBACK
+    # op op_unused_fc FALLBACK
+    # op op_unused_fd FALLBACK
+    # op op_unused_fe FALLBACK
+    # op op_unused_ff FALLBACK
 op-end
 
 # common subroutines for asm
diff --git a/runtime/interpreter/mterp/mips/alt_stub.S b/runtime/interpreter/mterp/mips/alt_stub.S
new file mode 100644
index 0000000..4598061
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/alt_stub.S
@@ -0,0 +1,13 @@
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (${opnum} * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
diff --git a/runtime/interpreter/mterp/mips/bincmp.S b/runtime/interpreter/mterp/mips/bincmp.S
new file mode 100644
index 0000000..70057f6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/bincmp.S
@@ -0,0 +1,37 @@
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    b${revcmp} a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_${opcode}_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+%break
+
+.L_${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
new file mode 100644
index 0000000..ce09da45
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binop.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
new file mode 100644
index 0000000..548cbcb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binop2addr.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
new file mode 100644
index 0000000..fc0c9ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopLit16.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if $chkzero
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
new file mode 100644
index 0000000..a591408
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopLit8.S
@@ -0,0 +1,31 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
new file mode 100644
index 0000000..608525b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopWide.S
@@ -0,0 +1,35 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
+    /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
new file mode 100644
index 0000000..cc92149
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopWide2addr.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64($result0, $result1, rOBJ)   #  vAA/vAA+1 <- $result0/$result1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
new file mode 100644
index 0000000..cef08fe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/entry.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .align 2
+    .global ExecuteMterpImpl
+    .ent    ExecuteMterpImpl
+    .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  code_item
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+    .set noreorder
+    .cpload t9
+    .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+    STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+    .cprestore STACK_OFFSET_GP
+
+    /* Remember the return register */
+    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the code_item */
+    sw      a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to insns[] (i.e. - the dalivk byte code).
+    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
+    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
+    addu    rPC, a1, CODEITEM_INSNS_OFFSET        # Point to base of insns[]
+    EAS1(rPC, rPC, a0)                            # Create direct pointer to 1st dex opcode
+
+    EXPORT_PC()
+
+    /* Starting ibase */
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+    /* start executing the instruction at rPC */
+    FETCH_INST()                           # load rINST from rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips/fallback.S b/runtime/interpreter/mterp/mips/fallback.S
new file mode 100644
index 0000000..82cbc63
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fallback.S
@@ -0,0 +1,2 @@
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
new file mode 100644
index 0000000..d0d39ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinop.S
@@ -0,0 +1,19 @@
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $instr                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
new file mode 100644
index 0000000..ccb67b1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinop2addr.S
@@ -0,0 +1,19 @@
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $instr
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
new file mode 100644
index 0000000..3be9325
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinopWide.S
@@ -0,0 +1,28 @@
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $instr
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
new file mode 100644
index 0000000..8541f11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
@@ -0,0 +1,21 @@
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
new file mode 100644
index 0000000..083dc15
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/footer.S
@@ -0,0 +1,179 @@
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogDivideByZeroException)
+#endif
+    b MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogArrayIndexException)
+#endif
+    b MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNegativeArraySizeException)
+#endif
+    b MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNoSuchMethodException)
+#endif
+    b MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNullObjectException)
+#endif
+    b MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogExceptionThrownException)
+#endif
+    b MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
+    JAL(MterpLogSuspendFallback)
+#endif
+    b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpHandleException)                    # (self, shadow_frame)
+    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
+    lw      a0, OFF_FP_CODE_ITEM(rFP)
+    lw      a1, OFF_FP_DEX_PC(rFP)
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+    addu    rPC, a0, CODEITEM_INSNS_OFFSET
+    sll     a1, a1, 1
+    addu    rPC, rPC, a1                         # generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC()
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)  # refresh rIBASE
+    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    bnez    ra, 1f
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+1:
+    EXPORT_PC()
+    move    a0, rSELF
+    JAL(MterpSuspendCheck)              # (self)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    JAL(MterpLogOSR)
+#endif
+    li      v0, 1                       # Signal normal return
+    b       MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    move    v0, zero                    # signal retry with reference interpreter.
+    b       MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                       # signal return to caller.
+    b       MterpDone
+MterpReturn:
+    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
+    sw      v0, 0(a2)
+    sw      v1, 4(a2)
+    li      v0, 1                       # signal return to caller.
+MterpDone:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+    STACK_LOAD_FULL()
+    jalr    zero, ra
+
+    .end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
new file mode 100644
index 0000000..bfb9346
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funop.S
@@ -0,0 +1,18 @@
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+
+.L${opcode}_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/funopWide.S b/runtime/interpreter/mterp/mips/funopWide.S
new file mode 100644
index 0000000..3d4cf22
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funopWide.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    $ld_arg
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0/a1 <- op, a2-a3 changed
+
+.L${opcode}_set_vreg:
+    $st_result                             #  vAA <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
new file mode 100644
index 0000000..efb85f3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funopWider.S
@@ -0,0 +1,19 @@
+%default {"st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+
+.L${opcode}_set_vreg:
+    $st_result                             #  vA/vA+1 <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
new file mode 100644
index 0000000..37ab21d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2    /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64           /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6    /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+   reg  nick      purpose
+   s0   rPC       interpreted program counter, used for fetching instructions
+   s1   rFP       interpreted frame pointer, used for accessing locals and args
+   s2   rSELF     self (Thread) pointer
+   s3   rIBASE    interpreted instruction base pointer, used for computed goto
+   s4   rINST     first 16-bit code unit of current instruction
+   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero    $$0      /* always zero */
+#define AT      $$at     /* assembler temp */
+#define v0      $$2      /* return value */
+#define v1      $$3
+#define a0      $$4      /* argument registers */
+#define a1      $$5
+#define a2      $$6
+#define a3      $$7
+#define t0      $$8      /* temp registers (not saved across subroutine calls) */
+#define t1      $$9
+#define t2      $$10
+#define t3      $$11
+#define t4      $$12
+#define t5      $$13
+#define t6      $$14
+#define t7      $$15
+#define ta0     $$12     /* alias */
+#define ta1     $$13
+#define ta2     $$14
+#define ta3     $$15
+#define s0      $$16     /* saved across subroutine calls (callee saved) */
+#define s1      $$17
+#define s2      $$18
+#define s3      $$19
+#define s4      $$20
+#define s5      $$21
+#define s6      $$22
+#define s7      $$23
+#define t8      $$24     /* two more temp registers */
+#define t9      $$25
+#define k0      $$26     /* kernel temporary */
+#define k1      $$27
+#define gp      $$28     /* global pointer */
+#define sp      $$29     /* stack pointer */
+#define s8      $$30     /* one more callee saved */
+#define ra      $$31     /* return address */
+
+/* FP register definitions */
+#define fv0    $$f0
+#define fv0f   $$f1
+#define fv1    $$f2
+#define fv1f   $$f3
+#define fa0    $$f12
+#define fa0f   $$f13
+#define fa1    $$f14
+#define fa1f   $$f15
+#define ft0    $$f4
+#define ft0f   $$f5
+#define ft1    $$f6
+#define ft1f   $$f7
+#define ft2    $$f8
+#define ft2f   $$f9
+#define ft3    $$f10
+#define ft3f   $$f11
+#define ft4    $$f16
+#define ft4f   $$f17
+#define ft5    $$f18
+#define ft5f   $$f19
+#define fs0    $$f20
+#define fs0f   $$f21
+#define fs1    $$f22
+#define fs1f   $$f23
+#define fs2    $$f24
+#define fs2f   $$f25
+#define fs3    $$f26
+#define fs3f   $$f27
+#define fs4    $$f28
+#define fs4f   $$f29
+#define fs5    $$f30
+#define fs5f   $$f31
+
+#ifndef MIPS32REVGE6
+#define fcc0   $$fcc0
+#define fcc1   $$fcc1
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+    lw   tmp, OFF_FP_CODE_ITEM(rFP) \
+    sw   rPC, OFF_FP_DEX_PC_PTR(rFP) \
+    addu tmp, CODEITEM_INSNS_OFFSET \
+    subu tmp, rPC, tmp \
+    sra  tmp, tmp, 1 \
+    sw   tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+    addu      rPC, rPC, ((_count) * 2)
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+    lhu       _dreg, ((_count)*2)(_sreg) ;            \
+    addu      _sreg, _sreg, (_count)*2
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd.  Updates
+ * rPC to point to the next instruction.  "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+    lhu       rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
+    addu      rd, rIBASE, rd; \
+    jalr      zero, rd
+
+#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, ${handler_size_bits}; \
+    addu      rd, _base, rd; \
+    jalr      zero, rd
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+
+#define SET_VREG64(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+
+#ifdef FPU64
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    s.s       rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#endif
+
+#define SET_VREG_OBJECT(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        rd, 0(t8)
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+    sll       dst, dst, ${handler_size_bits}; \
+    addu      dst, rIBASE, dst; \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
+    sll       dst, dst, ${handler_size_bits}; \
+    addu      dst, rIBASE, dst; \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+
+#define SET_VREG_F(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+    sll       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+    srl       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    sw        AT, (off+4)(rbase); \
+    .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    .set noat; \
+    lw        AT, (off+4)(rbase); \
+    mthc1     AT, rlo; \
+    .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    s.s       rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    l.s       rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP    84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(ra, 124); \
+    STACK_STORE(s8, 120); \
+    STACK_STORE(s0, 116); \
+    STACK_STORE(s1, 112); \
+    STACK_STORE(s2, 108); \
+    STACK_STORE(s3, 104); \
+    STACK_STORE(s4, 100); \
+    STACK_STORE(s5, 96); \
+    STACK_STORE(s6, 92); \
+    STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+    STACK_LOAD(s7, 88); \
+    STACK_LOAD(s6, 92); \
+    STACK_LOAD(s5, 96); \
+    STACK_LOAD(s4, 100); \
+    STACK_LOAD(s3, 104); \
+    STACK_LOAD(s2, 108); \
+    STACK_LOAD(s1, 112); \
+    STACK_LOAD(s0, 116); \
+    STACK_LOAD(s8, 120); \
+    STACK_LOAD(ra, 124); \
+    DELETE_STACK(STACK_SIZE)
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
new file mode 100644
index 0000000..bcd3a57
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern $helper
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL($helper)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
diff --git a/runtime/interpreter/mterp/mips/op_add_double.S b/runtime/interpreter/mterp/mips/op_add_double.S
new file mode 100644
index 0000000..12ef0cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_double_2addr.S b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
new file mode 100644
index 0000000..c57add5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float.S b/runtime/interpreter/mterp/mips/op_add_float.S
new file mode 100644
index 0000000..6a46cf0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float_2addr.S b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
new file mode 100644
index 0000000..6ab5cc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int.S b/runtime/interpreter/mterp/mips/op_add_int.S
new file mode 100644
index 0000000..53a0cb1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_2addr.S b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
new file mode 100644
index 0000000..ddd9214
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit16.S b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
new file mode 100644
index 0000000..05535c1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit8.S b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
new file mode 100644
index 0000000..fd021b3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_long.S b/runtime/interpreter/mterp/mips/op_add_long.S
new file mode 100644
index 0000000..faacc6a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_long.S
@@ -0,0 +1,9 @@
+/*
+ *  The compiler generates the following sequence for
+ *  [v1 v0] =  [a1 a0] + [a3 a2];
+ *    addu v0,a2,a0
+ *    addu a1,a3,a1
+ *    sltu v1,v0,a2
+ *    addu v1,v1,a1
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_add_long_2addr.S b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
new file mode 100644
index 0000000..bf827c1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
@@ -0,0 +1,4 @@
+/*
+ * See op_add_long.S for details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
new file mode 100644
index 0000000..8aa8992
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget.S
@@ -0,0 +1,32 @@
+%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if $shift
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $load a2, $data_offset(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
diff --git a/runtime/interpreter/mterp/mips/op_aget_boolean.S b/runtime/interpreter/mterp/mips/op_aget_boolean.S
new file mode 100644
index 0000000..59f7f82
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_byte.S b/runtime/interpreter/mterp/mips/op_aget_byte.S
new file mode 100644
index 0000000..11038fa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_char.S b/runtime/interpreter/mterp/mips/op_aget_char.S
new file mode 100644
index 0000000..96f2ab6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_char.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
new file mode 100644
index 0000000..e3ab9d8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_object.S
@@ -0,0 +1,20 @@
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a1, MterpException
+    SET_VREG_OBJECT(v0, rOBJ)              #  vAA <- v0
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aget_short.S b/runtime/interpreter/mterp/mips/op_aget_short.S
new file mode 100644
index 0000000..cd7f7bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_short.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_wide.S b/runtime/interpreter/mterp/mips/op_aget_wide.S
new file mode 100644
index 0000000..08822f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_wide.S
@@ -0,0 +1,22 @@
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
diff --git a/runtime/interpreter/mterp/mips/op_and_int.S b/runtime/interpreter/mterp/mips/op_and_int.S
new file mode 100644
index 0000000..98fe4af
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_2addr.S b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
new file mode 100644
index 0000000..7f90ed4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit16.S b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
new file mode 100644
index 0000000..e46f23b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit8.S b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
new file mode 100644
index 0000000..3332883
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long.S b/runtime/interpreter/mterp/mips/op_and_long.S
new file mode 100644
index 0000000..a98a6df
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long_2addr.S b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
new file mode 100644
index 0000000..350c044
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
new file mode 100644
index 0000000..53d6ae0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput.S
@@ -0,0 +1,30 @@
+%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if $shift
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_boolean.S b/runtime/interpreter/mterp/mips/op_aput_boolean.S
new file mode 100644
index 0000000..9cae5ef
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_byte.S b/runtime/interpreter/mterp/mips/op_aput_byte.S
new file mode 100644
index 0000000..3bbd12c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_char.S b/runtime/interpreter/mterp/mips/op_aput_char.S
new file mode 100644
index 0000000..ae69717
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_char.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_object.S b/runtime/interpreter/mterp/mips/op_aput_object.S
new file mode 100644
index 0000000..55b13b1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_object.S
@@ -0,0 +1,14 @@
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpAputObject)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_short.S b/runtime/interpreter/mterp/mips/op_aput_short.S
new file mode 100644
index 0000000..9586259
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_short.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
new file mode 100644
index 0000000..ef99261
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_wide.S
@@ -0,0 +1,25 @@
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t0 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
new file mode 100644
index 0000000..2b4a86f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_array_length.S
@@ -0,0 +1,12 @@
+    /*
+     * Return the length of an array.
+     */
+    GET_OPB(a1)                            #  a1 <- B
+    GET_OPA4(a2)                           #  a2 <- A+
+    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
+    # is object null?
+    beqz      a0, common_errNullObject     #  yup, fail
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
new file mode 100644
index 0000000..9a6cefa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_check_cast.S
@@ -0,0 +1,16 @@
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    # check-cast vAA, class                /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           #  a0 <- BBBB
+    GET_OPA(a1)                            #  a1 <- AA
+    EAS2(a1, rFP, a1)                      #  a1 <- &object
+    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
+    move   a3, rSELF                       #  a3 <- self
+    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
+    PREFETCH_INST(2)
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_cmp_long.S b/runtime/interpreter/mterp/mips/op_cmp_long.S
new file mode 100644
index 0000000..44806c3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmp_long.S
@@ -0,0 +1,34 @@
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    slt       t0, a1, a3                   #  compare hi
+    sgt       t1, a1, a3
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
+    bnez      v0, .L${opcode}_finish
+    # at this point x.hi==y.hi
+    sltu      t0, a0, a2                   #  compare lo
+    sgtu      t1, a0, a2
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
new file mode 100644
index 0000000..e7965a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "mips/op_cmpl_double.S" { "naninst":"li rTEMP, 1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
new file mode 100644
index 0000000..53519a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "mips/op_cmpl_float.S" { "naninst":"li rTEMP, 1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
new file mode 100644
index 0000000..5a47fd7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpl_double.S
@@ -0,0 +1,54 @@
+%default { "naninst":"li rTEMP, -1" }
+    /*
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register (rTEMP) based on the comparison results.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See op_cmpl_float for more details.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s5 <- BB
+    srl       t0, a0, 8                    #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
+    LOAD64_F(ft0, ft0f, rOBJ)
+    LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+    cmp.ult.d ft2, ft0, ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .L${opcode}_finish
+    cmp.ult.d ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .L${opcode}_finish
+    cmp.eq.d  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .L${opcode}_finish
+    b         .L${opcode}_nan
+#else
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .L${opcode}_finish
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .L${opcode}_finish
+    c.eq.d    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .L${opcode}_finish
+    b         .L${opcode}_nan
+#endif
+%break
+
+.L${opcode}_nan:
+    $naninst
+
+.L${opcode}_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
new file mode 100644
index 0000000..cfd87ee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpl_float.S
@@ -0,0 +1,61 @@
+%default { "naninst":"li rTEMP, -1" }
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register rTEMP based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1 or 1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+    GET_VREG_F(ft0, a2)
+    GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+    cmp.ult.s ft2, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .L${opcode}_finish
+    cmp.ult.s ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .L${opcode}_finish
+    cmp.eq.s  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .L${opcode}_finish
+    b         .L${opcode}_nan
+#else
+    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .L${opcode}_finish
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .L${opcode}_finish
+    c.eq.s    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .L${opcode}_finish
+    b         .L${opcode}_nan
+#endif
+%break
+
+.L${opcode}_nan:
+    $naninst
+
+.L${opcode}_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
new file mode 100644
index 0000000..c505761
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const.S
@@ -0,0 +1,9 @@
+    # const vAA,                           /* +BBBBbbbb */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a1, a1, 16
+    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
new file mode 100644
index 0000000..5e47633
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_16.S
@@ -0,0 +1,6 @@
+    # const/16 vAA,                        /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
new file mode 100644
index 0000000..8b662f9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_4.S
@@ -0,0 +1,8 @@
+    # const/4 vA,                          /* +B */
+    sll       a1, rINST, 16                #  a1 <- Bxxx0000
+    GET_OPA(a0)                            #  a0 <- A+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
+    and       a0, a0, 15
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
new file mode 100644
index 0000000..7202b11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_class.S
@@ -0,0 +1,12 @@
+    # const/class vAA, Class               /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- BBBB
+    GET_OPA(a1)                         # a1 <- AA
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstClass)
+    PREFETCH_INST(2)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
new file mode 100644
index 0000000..36c1c35
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_high16.S
@@ -0,0 +1,7 @@
+    # const/high16 vAA,                    /* +BBBB0000 */
+    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a0, a0, 16                   #  a0 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
new file mode 100644
index 0000000..d8eeb46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_string.S
@@ -0,0 +1,12 @@
+    # const/string vAA, String             /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- BBBB
+    GET_OPA(a1)                         # a1 <- AA
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(2)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
new file mode 100644
index 0000000..d732ca1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+    # const/string vAA, String          /* BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- bbbb (low)
+    FETCH(a2, 2)                        # a2 <- BBBB (high)
+    GET_OPA(a1)                         # a1 <- AA
+    sll    a2, a2, 16
+    or     a0, a0, a2                   # a0 <- BBBBbbbb
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(3)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(3)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
new file mode 100644
index 0000000..01d0f87
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide.S
@@ -0,0 +1,14 @@
+    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
+    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
+    sll       a1, 16 #
+    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    FETCH(a3, 4)                           #  a3 <- HHHH (high)
+    GET_OPA(t1)                            #  t1 <- AA
+    sll       a3, 16
+    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, t1)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
new file mode 100644
index 0000000..583d9ef
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_16.S
@@ -0,0 +1,8 @@
+    # const-wide/16 vAA,                   /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
new file mode 100644
index 0000000..3eb4574
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_32.S
@@ -0,0 +1,11 @@
+    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a2, a2, 16
+    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
new file mode 100644
index 0000000..88382c6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
@@ -0,0 +1,9 @@
+    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    li        a0, 0                        #  a0 <- 00000000
+    sll       a1, 16                       #  a1 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_div_double.S b/runtime/interpreter/mterp/mips/op_div_double.S
new file mode 100644
index 0000000..84e4c4e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_double_2addr.S b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
new file mode 100644
index 0000000..65b92e3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float.S b/runtime/interpreter/mterp/mips/op_div_float.S
new file mode 100644
index 0000000..44b8d47
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float_2addr.S b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
new file mode 100644
index 0000000..e5fff92
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_int.S b/runtime/interpreter/mterp/mips/op_div_int.S
new file mode 100644
index 0000000..5d28c84
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_2addr.S b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
new file mode 100644
index 0000000..6c079e0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit16.S b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
new file mode 100644
index 0000000..ee7452c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit8.S b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
new file mode 100644
index 0000000..d2964b8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_long.S b/runtime/interpreter/mterp/mips/op_div_long.S
new file mode 100644
index 0000000..2097866
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_long_2addr.S b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
new file mode 100644
index 0000000..c279305
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_float.S b/runtime/interpreter/mterp/mips/op_double_to_float.S
new file mode 100644
index 0000000..1d32c2e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_float.S
@@ -0,0 +1 @@
+%include "mips/unopNarrower.S" {"instr":"cvt.s.d fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
new file mode 100644
index 0000000..30a0a73
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -0,0 +1,58 @@
+%include "mips/unopNarrower.S" {"instr":"b d2i_doconv"}
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+%break
+
+d2i_doconv:
+#ifdef MIPS32REVGE6
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+
+    mov.d     fa1, fa0
+    cmp.un.d  ft2, fa0, fa1
+    li.s      fv0, 0
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+#else
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1t      .L${opcode}_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1t      .L${opcode}_set_vreg_f
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .L${opcode}_set_vreg_f
+#endif
+
+    trunc.w.d  fv0, fa0
+    b         .L${opcode}_set_vreg_f
+
+.LDOUBLE_TO_INT_max:
+    .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword 0xc1e0000000000000              #  minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word 0x80000000
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
new file mode 100644
index 0000000..4f9e367
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -0,0 +1,56 @@
+%include "mips/funopWide.S" {"instr":"b d2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+d2l_doconv:
+#ifdef MIPS32REVGE6
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1nez    ft2, .L${opcode}_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1nez    ft2, .L${opcode}_set_vreg
+
+    mov.d     fa1, fa0
+    cmp.un.d  ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1nez    ft2, .L${opcode}_set_vreg
+#else
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .L${opcode}_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .L${opcode}_set_vreg
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .L${opcode}_set_vreg
+#endif
+    JAL(__fixdfdi)
+    b         .L${opcode}_set_vreg
+
+.LDOUBLE_TO_LONG_max:
+    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+    .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+    .dword 0x8000000000000000
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
new file mode 100644
index 0000000..8605746
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_fill_array_data.S
@@ -0,0 +1,14 @@
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
+    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
+    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
+    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
new file mode 100644
index 0000000..3f62fae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array.S
@@ -0,0 +1,18 @@
+%default { "helper":"MterpFilledNewArray" }
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    .extern $helper
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
+    move   a1, rPC
+    move   a2, rSELF
+    JAL($helper)                           #  v0 <- helper(shadow_frame, pc, self)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
new file mode 100644
index 0000000..f8dcb0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "mips/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips/op_float_to_double.S b/runtime/interpreter/mterp/mips/op_float_to_double.S
new file mode 100644
index 0000000..1315255
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWider.S" {"instr":"cvt.d.s fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
new file mode 100644
index 0000000..e032869
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -0,0 +1,50 @@
+%include "mips/funop.S" {"instr":"b f2i_doconv"}
+%break
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef MIPS32REVGE6
+    l.s       fa1, .LFLOAT_TO_INT_max
+    cmp.ule.s ft2, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    cmp.ule.s ft2, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+
+    mov.s     fa1, fa0
+    cmp.un.s  ft2, fa0, fa1
+    li.s      fv0, 0
+    bc1nez    ft2, .L${opcode}_set_vreg_f
+#else
+    l.s       fa1, .LFLOAT_TO_INT_max
+    c.ole.s   fcc0, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1t      .L${opcode}_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    c.ole.s   fcc0, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1t      .L${opcode}_set_vreg_f
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .L${opcode}_set_vreg_f
+#endif
+
+    trunc.w.s  fv0, fa0
+    b         .L${opcode}_set_vreg_f
+
+.LFLOAT_TO_INT_max:
+    .word 0x4f000000
+.LFLOAT_TO_INT_min:
+    .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+    .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+    .word 0x80000000
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
new file mode 100644
index 0000000..77b2c46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -0,0 +1,51 @@
+%include "mips/funopWider.S" {"instr":"b f2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+f2l_doconv:
+#ifdef MIPS32REVGE6
+    l.s       fa1, .LLONG_TO_max
+    cmp.ule.s ft2, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1nez    ft2, .L${opcode}_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    cmp.ule.s ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1nez    ft2, .L${opcode}_set_vreg
+
+    mov.s     fa1, fa0
+    cmp.un.s  ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1nez    ft2, .L${opcode}_set_vreg
+#else
+    l.s       fa1, .LLONG_TO_max
+    c.ole.s   fcc0, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1t      .L${opcode}_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1t      .L${opcode}_set_vreg
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .L${opcode}_set_vreg
+#endif
+
+    JAL(__fixsfdi)
+
+    b         .L${opcode}_set_vreg
+
+.LLONG_TO_max:
+    .word 0x5f000000
+
+.LLONG_TO_min:
+    .word 0xdf000000
diff --git a/runtime/interpreter/mterp/mips/op_goto.S b/runtime/interpreter/mterp/mips/op_goto.S
new file mode 100644
index 0000000..d6f21c9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto.S
@@ -0,0 +1,38 @@
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+#if MTERP_PROFILE_BRANCHES
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a2, rINST, rINST             #  a2 <- byte offset
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    /* If backwards branch refresh rIBASE */
+    bgez      a2, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
+    addu      a2, rINST, rINST             #  a2 <- byte offset
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    /* If backwards branch refresh rIBASE */
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_16.S b/runtime/interpreter/mterp/mips/op_goto_16.S
new file mode 100644
index 0000000..cec4432
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto_16.S
@@ -0,0 +1,34 @@
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+#if MTERP_PROFILE_BRANCHES
+    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
+    addu      a1, rINST, rINST             #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
new file mode 100644
index 0000000..083acd1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto_32.S
@@ -0,0 +1,43 @@
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+#if MTERP_PROFILE_BRANCHES
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_if_eq.S b/runtime/interpreter/mterp/mips/op_if_eq.S
new file mode 100644
index 0000000..e7190d8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_eq.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_eqz.S b/runtime/interpreter/mterp/mips/op_if_eqz.S
new file mode 100644
index 0000000..0a78fd9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_eqz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ge.S b/runtime/interpreter/mterp/mips/op_if_ge.S
new file mode 100644
index 0000000..b2629ba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ge.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gez.S b/runtime/interpreter/mterp/mips/op_if_gez.S
new file mode 100644
index 0000000..b02f677
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gt.S b/runtime/interpreter/mterp/mips/op_if_gt.S
new file mode 100644
index 0000000..f620d4a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gt.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gtz.S b/runtime/interpreter/mterp/mips/op_if_gtz.S
new file mode 100644
index 0000000..5e5dd70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gtz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_le.S b/runtime/interpreter/mterp/mips/op_if_le.S
new file mode 100644
index 0000000..a4e8b1a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_le.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lez.S b/runtime/interpreter/mterp/mips/op_if_lez.S
new file mode 100644
index 0000000..af551a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_lez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lt.S b/runtime/interpreter/mterp/mips/op_if_lt.S
new file mode 100644
index 0000000..f33b9a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_lt.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ltz.S b/runtime/interpreter/mterp/mips/op_if_ltz.S
new file mode 100644
index 0000000..18fcb1d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ltz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ne.S b/runtime/interpreter/mterp/mips/op_if_ne.S
new file mode 100644
index 0000000..e0a102b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ne.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_nez.S b/runtime/interpreter/mterp/mips/op_if_nez.S
new file mode 100644
index 0000000..d1866a0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_nez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
new file mode 100644
index 0000000..86d44fa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL($helper)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if $is_object
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
new file mode 100644
index 0000000..e03364e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
new file mode 100644
index 0000000..f3032b3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
new file mode 100644
index 0000000..dc87cfe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
new file mode 100644
index 0000000..d93f844
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
new file mode 100644
index 0000000..55f8a93
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_char.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char_quick.S b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
new file mode 100644
index 0000000..6f6d608
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
new file mode 100644
index 0000000..11d93a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_object.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
new file mode 100644
index 0000000..31d94b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
@@ -0,0 +1,15 @@
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
+    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           #  a2<- A+
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a3, MterpPossibleException        #  bail out
+    SET_VREG_OBJECT(v0, a2)                #  fp[A] <- v0
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
new file mode 100644
index 0000000..fbafa5b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"lw" }
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    $load     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
new file mode 100644
index 0000000..9086246
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_short.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short_quick.S b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
new file mode 100644
index 0000000..899a0fe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
new file mode 100644
index 0000000..8fe3089
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -0,0 +1,20 @@
+    /*
+     * 64-bit instance field get.
+     *
+     * for: iget-wide
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field byte offset
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGet64InstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez a3, MterpException                # bail out
+    SET_VREG64(v0, v1, a2)                 # fp[A] <- v0/v1
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
new file mode 100644
index 0000000..4d2f291
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
@@ -0,0 +1,13 @@
+    # iget-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1                   #  t0 <- a3 + a1
+    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
new file mode 100644
index 0000000..d2679bd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_instance_of.S
@@ -0,0 +1,21 @@
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    # instance-of vA, vB, class            /* CCCC */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- CCCC
+    GET_OPB(a1)                            # a1 <- B
+    EAS2(a1, rFP, a1)                      # a1 <- &object
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    GET_OPA4(rOBJ)                         # rOBJ <- A+
+    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       # load rINST
+    bnez a1, MterpException
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
new file mode 100644
index 0000000..77314c62
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_char.S b/runtime/interpreter/mterp/mips/op_int_to_char.S
new file mode 100644
index 0000000..1b74a6e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_char.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_double.S b/runtime/interpreter/mterp/mips/op_int_to_double.S
new file mode 100644
index 0000000..89484ce
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWider.S" {"instr":"cvt.d.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_float.S b/runtime/interpreter/mterp/mips/op_int_to_float.S
new file mode 100644
index 0000000..d6f4b36
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_float.S
@@ -0,0 +1 @@
+%include "mips/funop.S" {"instr":"cvt.s.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_long.S b/runtime/interpreter/mterp/mips/op_int_to_long.S
new file mode 100644
index 0000000..9907463
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_long.S
@@ -0,0 +1 @@
+%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
new file mode 100644
index 0000000..5649c2a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_short.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct.S b/runtime/interpreter/mterp/mips/op_invoke_direct.S
new file mode 100644
index 0000000..1ef198a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
new file mode 100644
index 0000000..af7477f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface.S b/runtime/interpreter/mterp/mips/op_invoke_interface.S
new file mode 100644
index 0000000..80a485a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_interface.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeInterface" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
new file mode 100644
index 0000000..8d725dc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static.S b/runtime/interpreter/mterp/mips/op_invoke_static.S
new file mode 100644
index 0000000..46253cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_static.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static_range.S b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
new file mode 100644
index 0000000..96abafe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super.S b/runtime/interpreter/mterp/mips/op_invoke_super.S
new file mode 100644
index 0000000..473951b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_super.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeSuper" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super_range.S b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
new file mode 100644
index 0000000..963ff27
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual.S b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
new file mode 100644
index 0000000..ea51e98
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtual" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..0c00091
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
new file mode 100644
index 0000000..82201e7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..c783675
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
new file mode 100644
index 0000000..732a9a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput.S
@@ -0,0 +1,21 @@
+%default { "handler":"artSet32InstanceFromMterp" }
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern $handler
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL($handler)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean.S b/runtime/interpreter/mterp/mips/op_iput_boolean.S
new file mode 100644
index 0000000..da28c97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
new file mode 100644
index 0000000..7d5caf6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte.S b/runtime/interpreter/mterp/mips/op_iput_byte.S
new file mode 100644
index 0000000..da28c97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
new file mode 100644
index 0000000..7d5caf6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char.S b/runtime/interpreter/mterp/mips/op_iput_char.S
new file mode 100644
index 0000000..389b0bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_char.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char_quick.S b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
new file mode 100644
index 0000000..4bc84eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
new file mode 100644
index 0000000..6b856e7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_object.S
@@ -0,0 +1,16 @@
+    /*
+     * 32-bit instance field put.
+     *
+     * for: iput-object, iput-object-volatile
+     */
+    # op vA, vB, field                     /* CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpIputObject)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
new file mode 100644
index 0000000..c3f1526
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
@@ -0,0 +1,11 @@
+    /* For: iput-object-quick */
+    # op vA, vB, offset                 /* CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpIputObjectQuick)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
new file mode 100644
index 0000000..0829666
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"sw" }
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_short.S b/runtime/interpreter/mterp/mips/op_iput_short.S
new file mode 100644
index 0000000..389b0bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_short.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_short_quick.S b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
new file mode 100644
index 0000000..4bc84eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
new file mode 100644
index 0000000..6d23f8c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_wide.S
@@ -0,0 +1,15 @@
+    # iput-wide vA, vB, field              /* CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    EAS2(a2, rFP, a2)                      # a2 <- &fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet64InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
new file mode 100644
index 0000000..9fdb847
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
@@ -0,0 +1,14 @@
+    # iput-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPA4(a0)                           #  a0 <- A(+)
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
+    # check object for null
+    beqz      a2, common_errNullObject     #  object was null
+    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
+    FETCH(a3, 1)                           #  a3 <- field byte offset
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
new file mode 100644
index 0000000..b83aaf4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
new file mode 100644
index 0000000..27faba5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_float.S
@@ -0,0 +1 @@
+%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/runtime/interpreter/mterp/mips/op_long_to_int.S b/runtime/interpreter/mterp/mips/op_long_to_int.S
new file mode 100644
index 0000000..949c180
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "mips/op_move.S"
diff --git a/runtime/interpreter/mterp/mips/op_monitor_enter.S b/runtime/interpreter/mterp/mips/op_monitor_enter.S
new file mode 100644
index 0000000..20d9029
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_monitor_enter.S
@@ -0,0 +1,13 @@
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_monitor_exit.S b/runtime/interpreter/mterp/mips/op_monitor_exit.S
new file mode 100644
index 0000000..1eadff9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_monitor_exit.S
@@ -0,0 +1,17 @@
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
new file mode 100644
index 0000000..76588ba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
new file mode 100644
index 0000000..f7de6c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
new file mode 100644
index 0000000..f04a035
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_exception.S
@@ -0,0 +1,8 @@
+    /* move-exception vAA */
+    GET_OPA(a2)                                 #  a2 <- AA
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
+    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
+    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
+    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
+    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
+    GOTO_OPCODE(t0)                             #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
new file mode 100644
index 0000000..b8be741
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_object.S b/runtime/interpreter/mterp/mips/op_move_object.S
new file mode 100644
index 0000000..9420ff3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object.S
@@ -0,0 +1 @@
+%include "mips/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_16.S b/runtime/interpreter/mterp/mips/op_move_object_16.S
new file mode 100644
index 0000000..d6454c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object_16.S
@@ -0,0 +1 @@
+%include "mips/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_from16.S b/runtime/interpreter/mterp/mips/op_move_object_from16.S
new file mode 100644
index 0000000..db0aca1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "mips/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
new file mode 100644
index 0000000..315c68e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    lw    a0, 0(a0)                        #  a0 <- result.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    .else
+    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result_object.S b/runtime/interpreter/mterp/mips/op_move_result_object.S
new file mode 100644
index 0000000..fcbffee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result_object.S
@@ -0,0 +1 @@
+%include "mips/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
new file mode 100644
index 0000000..940c1ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result_wide.S
@@ -0,0 +1,8 @@
+    /* move-result-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
new file mode 100644
index 0000000..dd224c3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide.S
@@ -0,0 +1,10 @@
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
new file mode 100644
index 0000000..d8761eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide_16.S
@@ -0,0 +1,10 @@
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 2)                           #  a3 <- BBBB
+    FETCH(a2, 1)                           #  a2 <- AAAA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AAAA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
new file mode 100644
index 0000000..2103fa1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
@@ -0,0 +1,10 @@
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 1)                           #  a3 <- BBBB
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_mul_double.S b/runtime/interpreter/mterp/mips/op_mul_double.S
new file mode 100644
index 0000000..44a473b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
new file mode 100644
index 0000000..4e5c230
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float.S b/runtime/interpreter/mterp/mips/op_mul_float.S
new file mode 100644
index 0000000..abc9390
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
new file mode 100644
index 0000000..2469109
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int.S b/runtime/interpreter/mterp/mips/op_mul_int.S
new file mode 100644
index 0000000..266823c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
new file mode 100644
index 0000000..b7dc5d3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
new file mode 100644
index 0000000..fb4c8ec
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
new file mode 100644
index 0000000..6d2e7de
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
new file mode 100644
index 0000000..803bbec
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_long.S
@@ -0,0 +1,43 @@
+    /*
+     * Signed 64-bit integer multiply.
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       t0, a0, 255                  #  a2 <- BB
+    srl       t1, a0, 8                    #  a3 <- CC
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
+    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+#endif
+    mul       t0, a2, a1                   #  t0= a2a1
+    addu      v1, v1, t1                   #  v1+= hi(a2a0)
+    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
+
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    b         .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, a0)                 #  vAA::vAA+1 <- v0(low) :: v1(high)
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
new file mode 100644
index 0000000..6950b71
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
@@ -0,0 +1,31 @@
+    /*
+     * See op_mul_long.S for more details
+     */
+    /* mul-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  vAA.low / high
+
+    GET_OPB(t1)                            #  t1 <- B
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
+    LOAD64(a2, a3, t1)                     #  vBB.low / high
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+ #endif
+    mul       t2, a2, a1                   #  t2= a2a1
+    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
+    addu      v1, v1, t2                   #  v1= v1 + a2a1;
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    # vAA <- v0 (low)
+    SET_VREG64(v0, v1, rOBJ)               #  vAA+1 <- v1 (high)
+    GOTO_OPCODE(t1)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_neg_double.S b/runtime/interpreter/mterp/mips/op_neg_double.S
new file mode 100644
index 0000000..89cc918
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_double.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_float.S b/runtime/interpreter/mterp/mips/op_neg_float.S
new file mode 100644
index 0000000..e702755
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_float.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_int.S b/runtime/interpreter/mterp/mips/op_neg_int.S
new file mode 100644
index 0000000..4461731
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_int.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_long.S b/runtime/interpreter/mterp/mips/op_neg_long.S
new file mode 100644
index 0000000..71e60f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_long.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_new_array.S b/runtime/interpreter/mterp/mips/op_new_array.S
new file mode 100644
index 0000000..4a6512d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_new_array.S
@@ -0,0 +1,18 @@
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpNewArray)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
new file mode 100644
index 0000000..51a09b2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_new_instance.S
@@ -0,0 +1,13 @@
+    /*
+     * Create a new instance of a class.
+     */
+    # new-instance vAA, class              /* BBBB */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rSELF
+    move   a2, rINST
+    JAL(MterpNewInstance)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_nop.S b/runtime/interpreter/mterp/mips/op_nop.S
new file mode 100644
index 0000000..3565631
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_nop.S
@@ -0,0 +1,3 @@
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_not_int.S b/runtime/interpreter/mterp/mips/op_not_int.S
new file mode 100644
index 0000000..55d8cc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_not_int.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_not_long.S b/runtime/interpreter/mterp/mips/op_not_long.S
new file mode 100644
index 0000000..9e7c95b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_not_long.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int.S b/runtime/interpreter/mterp/mips/op_or_int.S
new file mode 100644
index 0000000..c7ce760
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_2addr.S b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
new file mode 100644
index 0000000..192d611
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit16.S b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
new file mode 100644
index 0000000..f4ef75f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit8.S b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
new file mode 100644
index 0000000..f6212e2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long.S b/runtime/interpreter/mterp/mips/op_or_long.S
new file mode 100644
index 0000000..0f94486
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long_2addr.S b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
new file mode 100644
index 0000000..43c3d05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
new file mode 100644
index 0000000..93fae97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_packed_switch.S
@@ -0,0 +1,57 @@
+%default { "func":"MterpDoPackedSwitch" }
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL($func)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, .L${opcode}_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+#else
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL($func)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_rem_double.S b/runtime/interpreter/mterp/mips/op_rem_double.S
new file mode 100644
index 0000000..a6890a8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
new file mode 100644
index 0000000..a24e160
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float.S b/runtime/interpreter/mterp/mips/op_rem_float.S
new file mode 100644
index 0000000..ac3d50c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
new file mode 100644
index 0000000..7f0a932
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_int.S b/runtime/interpreter/mterp/mips/op_rem_int.S
new file mode 100644
index 0000000..c2a334a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
new file mode 100644
index 0000000..46c353f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
new file mode 100644
index 0000000..2894ad3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
new file mode 100644
index 0000000..582248b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_long.S b/runtime/interpreter/mterp/mips/op_rem_long.S
new file mode 100644
index 0000000..e3eb19b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
new file mode 100644
index 0000000..8fc9fdb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_return.S b/runtime/interpreter/mterp/mips/op_return.S
new file mode 100644
index 0000000..894ae18
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return.S
@@ -0,0 +1,18 @@
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(v0, a2)                       #  v0 <- vAA
+    move      v1, zero
+    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_object.S b/runtime/interpreter/mterp/mips/op_return_object.S
new file mode 100644
index 0000000..7350e00
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_object.S
@@ -0,0 +1 @@
+%include "mips/op_return.S"
diff --git a/runtime/interpreter/mterp/mips/op_return_void.S b/runtime/interpreter/mterp/mips/op_return_void.S
new file mode 100644
index 0000000..35c1326
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_void.S
@@ -0,0 +1,11 @@
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move      v0, zero
+    move      v1, zero
+    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
new file mode 100644
index 0000000..56968b5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
@@ -0,0 +1,9 @@
+    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
+    move   a0, rSELF
+    and    ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz   ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move   v0, zero
+    move   v1, zero
+    b      MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_wide.S b/runtime/interpreter/mterp/mips/op_return_wide.S
new file mode 100644
index 0000000..91d62bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_wide.S
@@ -0,0 +1,16 @@
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
+    b         MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int.S b/runtime/interpreter/mterp/mips/op_rsub_int.S
new file mode 100644
index 0000000..f7e61bb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
new file mode 100644
index 0000000..3968a5e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
new file mode 100644
index 0000000..3efcfbb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern $helper
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL($helper)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if $is_object
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
new file mode 100644
index 0000000..45a5a70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
new file mode 100644
index 0000000..319122c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
new file mode 100644
index 0000000..7103847
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_char.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
new file mode 100644
index 0000000..b205f51
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_object.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
new file mode 100644
index 0000000..3301823
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_short.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
new file mode 100644
index 0000000..7aee386
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -0,0 +1,17 @@
+    /*
+     * 64-bit SGET handler.
+     */
+    # sget-wide vAA, field                 /* BBBB */
+    .extern artGet64StaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGet64StaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    bnez  a3, MterpException
+    GET_OPA(a1)                            # a1 <- AA
+    FETCH_ADVANCE_INST(2)                  # advance rPC, load rINST
+    SET_VREG64(v0, v1, a1)                 # vAA/vAA+1 <- v0/v1
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_shl_int.S b/runtime/interpreter/mterp/mips/op_shl_int.S
new file mode 100644
index 0000000..15cbe94
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
new file mode 100644
index 0000000..ef9bd65
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
new file mode 100644
index 0000000..d2afb53
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
new file mode 100644
index 0000000..0121669
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_long.S
@@ -0,0 +1,31 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t2)                            #  t2 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .L${opcode}_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
new file mode 100644
index 0000000..8ce6058
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
@@ -0,0 +1,27 @@
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .L${opcode}_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_int.S b/runtime/interpreter/mterp/mips/op_shr_int.S
new file mode 100644
index 0000000..6110839
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
new file mode 100644
index 0000000..e00ff5b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
new file mode 100644
index 0000000..d058f58
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
new file mode 100644
index 0000000..4c42758
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_long.S
@@ -0,0 +1,31 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t3)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .L${opcode}_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v0
+%break
+
+.L${opcode}_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
new file mode 100644
index 0000000..3adc085
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
@@ -0,0 +1,27 @@
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .L${opcode}_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sparse_switch.S b/runtime/interpreter/mterp/mips/op_sparse_switch.S
new file mode 100644
index 0000000..670f464
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "mips/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
new file mode 100644
index 0000000..ee313b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -0,0 +1,19 @@
+%default { "helper":"artSet32StaticFromCode"}
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL($helper)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
new file mode 100644
index 0000000..7909ef5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
new file mode 100644
index 0000000..7909ef5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
new file mode 100644
index 0000000..188195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_char.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_object.S b/runtime/interpreter/mterp/mips/op_sput_object.S
new file mode 100644
index 0000000..4f9034e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_object.S
@@ -0,0 +1,16 @@
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput-object,
+     */
+    /* op vAA, field@BBBB */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpSputObject)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
new file mode 100644
index 0000000..188195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_short.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
new file mode 100644
index 0000000..1e11466
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -0,0 +1,17 @@
+    /*
+     * 64-bit SPUT handler.
+     */
+    # sput-wide vAA, field                 /* BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    GET_OPA(a2)                            # a2 <- AA
+    EAS2(a2, rFP, a2)                      # a2 <- &fp[AA]
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet64IndirectStaticFromMterp)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sub_double.S b/runtime/interpreter/mterp/mips/op_sub_double.S
new file mode 100644
index 0000000..9473218
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
new file mode 100644
index 0000000..7ce7c74
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float.S b/runtime/interpreter/mterp/mips/op_sub_float.S
new file mode 100644
index 0000000..04650d9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
new file mode 100644
index 0000000..dfe935c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int.S b/runtime/interpreter/mterp/mips/op_sub_int.S
new file mode 100644
index 0000000..43da1b6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
new file mode 100644
index 0000000..cf34aa6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_long.S b/runtime/interpreter/mterp/mips/op_sub_long.S
new file mode 100644
index 0000000..0f58e8e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_long.S
@@ -0,0 +1,8 @@
+/*
+ * For little endian the code sequence looks as follows:
+ *    subu    v0,a0,a2
+ *    subu    v1,a1,a3
+ *    sltu    a0,a0,v0
+ *    subu    v1,v1,a0
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
new file mode 100644
index 0000000..aa256c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
@@ -0,0 +1,4 @@
+/*
+ * See op_sub_long.S for more details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_throw.S b/runtime/interpreter/mterp/mips/op_throw.S
new file mode 100644
index 0000000..adc8b04
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_throw.S
@@ -0,0 +1,11 @@
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC()                              #  exception handler can throw
+    GET_OPA(a2)                              #  a2 <- AA
+    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
+    # null object?
+    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
+    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
+    b         MterpException
diff --git a/runtime/interpreter/mterp/mips/op_unused_3e.S b/runtime/interpreter/mterp/mips/op_unused_3e.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_3e.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_3f.S b/runtime/interpreter/mterp/mips/op_unused_3f.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_3f.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_40.S b/runtime/interpreter/mterp/mips/op_unused_40.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_40.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_41.S b/runtime/interpreter/mterp/mips/op_unused_41.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_41.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_42.S b/runtime/interpreter/mterp/mips/op_unused_42.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_42.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_43.S b/runtime/interpreter/mterp/mips/op_unused_43.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_43.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_73.S b/runtime/interpreter/mterp/mips/op_unused_73.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_73.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_79.S b/runtime/interpreter/mterp/mips/op_unused_79.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_79.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_7a.S b/runtime/interpreter/mterp/mips/op_unused_7a.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_7a.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f3.S b/runtime/interpreter/mterp/mips/op_unused_f3.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f3.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f4.S b/runtime/interpreter/mterp/mips/op_unused_f4.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f4.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f5.S b/runtime/interpreter/mterp/mips/op_unused_f5.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f5.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f6.S b/runtime/interpreter/mterp/mips/op_unused_f6.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f6.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f7.S b/runtime/interpreter/mterp/mips/op_unused_f7.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f7.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f8.S b/runtime/interpreter/mterp/mips/op_unused_f8.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f8.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f9.S b/runtime/interpreter/mterp/mips/op_unused_f9.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f9.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fa.S b/runtime/interpreter/mterp/mips/op_unused_fa.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fa.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fb.S b/runtime/interpreter/mterp/mips/op_unused_fb.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fb.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fc.S b/runtime/interpreter/mterp/mips/op_unused_fc.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fc.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fd.S b/runtime/interpreter/mterp/mips/op_unused_fd.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fd.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fe.S b/runtime/interpreter/mterp/mips/op_unused_fe.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fe.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_ff.S b/runtime/interpreter/mterp/mips/op_unused_ff.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_ff.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int.S b/runtime/interpreter/mterp/mips/op_ushr_int.S
new file mode 100644
index 0000000..b95472b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
new file mode 100644
index 0000000..fc17778
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"srl a0, a0, a1 "}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
new file mode 100644
index 0000000..c82cfba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long.S b/runtime/interpreter/mterp/mips/op_ushr_long.S
new file mode 100644
index 0000000..2e227a9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_long.S
@@ -0,0 +1,31 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .L${opcode}_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
+%break
+
+.L${opcode}_finish:
+    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
new file mode 100644
index 0000000..ccf1f7e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
@@ -0,0 +1,27 @@
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    GET_OPA4(t3)                           #  t3 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .L${opcode}_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_xor_int.S b/runtime/interpreter/mterp/mips/op_xor_int.S
new file mode 100644
index 0000000..6c23f1f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
new file mode 100644
index 0000000..5ee1667
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
new file mode 100644
index 0000000..2af37a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
new file mode 100644
index 0000000..944ed69
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long.S b/runtime/interpreter/mterp/mips/op_xor_long.S
new file mode 100644
index 0000000..93f8f70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
new file mode 100644
index 0000000..49f3fa4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
new file mode 100644
index 0000000..52a8f0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unop.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0"}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
new file mode 100644
index 0000000..9c38bad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopNarrower.S
@@ -0,0 +1,24 @@
+%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter,
+     * except for long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    $load
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $instr
+
+.L${opcode}_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
new file mode 100644
index 0000000..fd25dff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopWide.S
@@ -0,0 +1,20 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double,
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64($result0, $result1, rOBJ)   #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
new file mode 100644
index 0000000..1c18837
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopWider.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64($result0, $result1, rOBJ)   #  vA/vA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/mips/unused.S b/runtime/interpreter/mterp/mips/unused.S
new file mode 100644
index 0000000..ffa00be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/zcmp.S b/runtime/interpreter/mterp/mips/zcmp.S
new file mode 100644
index 0000000..1fa1385
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/zcmp.S
@@ -0,0 +1,32 @@
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    b${revcmp} a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/alt_stub.S b/runtime/interpreter/mterp/mips64/alt_stub.S
new file mode 100644
index 0000000..bd76a1b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/alt_stub.S
@@ -0,0 +1,14 @@
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (${opnum} * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S
new file mode 100644
index 0000000..d39c900
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/bincmp.S
@@ -0,0 +1,32 @@
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    b${condition}c a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop.S b/runtime/interpreter/mterp/mips64/binop.S
new file mode 100644
index 0000000..fab48b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binop.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a4                # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop2addr.S b/runtime/interpreter/mterp/mips64/binop2addr.S
new file mode 100644
index 0000000..1ae73f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binop2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopLit16.S b/runtime/interpreter/mterp/mips64/binopLit16.S
new file mode 100644
index 0000000..9257758
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopLit16.S
@@ -0,0 +1,28 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
diff --git a/runtime/interpreter/mterp/mips64/binopLit8.S b/runtime/interpreter/mterp/mips64/binopLit8.S
new file mode 100644
index 0000000..f4a0bba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopLit8.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG $result, a2                # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
+
diff --git a/runtime/interpreter/mterp/mips64/binopWide.S b/runtime/interpreter/mterp/mips64/binopWide.S
new file mode 100644
index 0000000..732f0d6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopWide.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE $result, a4           # vAA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopWide2addr.S b/runtime/interpreter/mterp/mips64/binopWide2addr.S
new file mode 100644
index 0000000..45d8d82
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopWide2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $preinstr                           # optional op
+    $instr                              # $result <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE $result, a2           # vA <- $result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
new file mode 100644
index 0000000..ae6c26b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/entry.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+    .set    reorder
+
+    .text
+    .global ExecuteMterpImpl
+    .type   ExecuteMterpImpl, %function
+    .balign 16
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  code_item
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+    .cfi_startproc
+    .cpsetup t9, t8, ExecuteMterpImpl
+
+    .cfi_def_cfa sp, 0
+    daddu   sp, sp, -STACK_SIZE
+    .cfi_adjust_cfa_offset STACK_SIZE
+
+    sd      t8, STACK_OFFSET_GP(sp)
+    .cfi_rel_offset 28, STACK_OFFSET_GP
+    sd      ra, STACK_OFFSET_RA(sp)
+    .cfi_rel_offset 31, STACK_OFFSET_RA
+
+    sd      s0, STACK_OFFSET_S0(sp)
+    .cfi_rel_offset 16, STACK_OFFSET_S0
+    sd      s1, STACK_OFFSET_S1(sp)
+    .cfi_rel_offset 17, STACK_OFFSET_S1
+    sd      s2, STACK_OFFSET_S2(sp)
+    .cfi_rel_offset 18, STACK_OFFSET_S2
+    sd      s3, STACK_OFFSET_S3(sp)
+    .cfi_rel_offset 19, STACK_OFFSET_S3
+    sd      s4, STACK_OFFSET_S4(sp)
+    .cfi_rel_offset 20, STACK_OFFSET_S4
+    sd      s5, STACK_OFFSET_S5(sp)
+    .cfi_rel_offset 21, STACK_OFFSET_S5
+
+    /* Remember the return register */
+    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the code_item */
+    sd      a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
+    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    dlsa    rREFS, v0, rFP, 2
+    daddu   rPC, a1, CODEITEM_INSNS_OFFSET
+    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+    dlsa    rPC, v0, rPC, 1
+    EXPORT_PC
+
+    /* Starting ibase */
+    REFRESH_IBASE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips64/fallback.S b/runtime/interpreter/mterp/mips64/fallback.S
new file mode 100644
index 0000000..560b994
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fallback.S
@@ -0,0 +1,2 @@
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/fbinop.S b/runtime/interpreter/mterp/mips64/fbinop.S
new file mode 100644
index 0000000..f19dd1c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinop.S
@@ -0,0 +1,18 @@
+%default {}
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinop2addr.S b/runtime/interpreter/mterp/mips64/fbinop2addr.S
new file mode 100644
index 0000000..2e2cd7e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinop2addr.S
@@ -0,0 +1,17 @@
+%default {}
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide.S b/runtime/interpreter/mterp/mips64/fbinopWide.S
new file mode 100644
index 0000000..8915c94
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinopWide.S
@@ -0,0 +1,18 @@
+%default {}
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
new file mode 100644
index 0000000..a3f4eaa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
@@ -0,0 +1,17 @@
+%default {}
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    $instr                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmp.S b/runtime/interpreter/mterp/mips64/fcmp.S
new file mode 100644
index 0000000..2e1a3e4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcmp.S
@@ -0,0 +1,32 @@
+%default {}
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    cmp.eq.s f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.s f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.s f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmpWide.S b/runtime/interpreter/mterp/mips64/fcmpWide.S
new file mode 100644
index 0000000..2a3a341
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcmpWide.S
@@ -0,0 +1,32 @@
+%default {}
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    cmp.eq.d f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.d f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.d f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtFooter.S b/runtime/interpreter/mterp/mips64/fcvtFooter.S
new file mode 100644
index 0000000..06e9507
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcvtFooter.S
@@ -0,0 +1,18 @@
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG$suffix $valreg, a1
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtHeader.S b/runtime/interpreter/mterp/mips64/fcvtHeader.S
new file mode 100644
index 0000000..8742e42
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcvtHeader.S
@@ -0,0 +1,15 @@
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG$suffix $valreg, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
new file mode 100644
index 0000000..1a2e22b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -0,0 +1,154 @@
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+    .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogDivideByZeroException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogArrayIndexException
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogArrayIndexException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogNullObjectException
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogNullObjectException
+#endif
+    b       MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+    .extern MterpHandleException
+MterpException:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpHandleException                    # (self, shadow_frame)
+    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
+    ld      a0, OFF_FP_CODE_ITEM(rFP)
+    lwu     a1, OFF_FP_DEX_PC(rFP)
+    REFRESH_IBASE
+    daddu   rPC, a0, CODEITEM_INSNS_OFFSET
+    dlsa    rPC, a1, rPC, 1                         # generate new dex_pc_ptr
+    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* resume execution at catch block */
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in ra.
+ */
+    .extern MterpSuspendCheck
+MterpCheckSuspendAndContinue:
+    REFRESH_IBASE
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    bnez    ra, check1
+    GET_INST_OPCODE v0                              # extract opcode from rINST
+    GOTO_OPCODE v0                                  # jump to next instruction
+check1:
+    EXPORT_PC
+    move    a0, rSELF
+    jal     MterpSuspendCheck                       # (self)
+    GET_INST_OPCODE v0                              # extract opcode from rINST
+    GOTO_OPCODE v0                                  # jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+    .extern MterpLogFallback
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogFallback
+#endif
+MterpCommonFallback:
+    li      v0, 0                                   # signal retry with reference interpreter.
+    b       MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA.  Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                                   # signal return to caller.
+    b       MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be 0.
+ */
+MterpReturn:
+    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    sd      a0, 0(a2)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, check2
+    jal     MterpSuspendCheck                       # (self)
+check2:
+    li      v0, 1                                   # signal return to caller.
+MterpDone:
+    ld      s5, STACK_OFFSET_S5(sp)
+    .cfi_restore 21
+    ld      s4, STACK_OFFSET_S4(sp)
+    .cfi_restore 20
+    ld      s3, STACK_OFFSET_S3(sp)
+    .cfi_restore 19
+    ld      s2, STACK_OFFSET_S2(sp)
+    .cfi_restore 18
+    ld      s1, STACK_OFFSET_S1(sp)
+    .cfi_restore 17
+    ld      s0, STACK_OFFSET_S0(sp)
+    .cfi_restore 16
+
+    ld      ra, STACK_OFFSET_RA(sp)
+    .cfi_restore 31
+
+    ld      t8, STACK_OFFSET_GP(sp)
+    .cpreturn
+    .cfi_restore 28
+
+    .set    noreorder
+    jr      ra
+    daddu   sp, sp, STACK_SIZE
+    .cfi_adjust_cfa_offset -STACK_SIZE
+
+    .cfi_endproc
+    .size ExecuteMterpImpl, .-ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
new file mode 100644
index 0000000..4c3ca9e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/header.S
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <machine/regdef.h>
+
+/* TODO: add the missing file and use its FP register definitions. */
+/* #include <machine/fpregdef.h> */
+/* FP register definitions */
+#define f0  $$f0
+#define f1  $$f1
+#define f2  $$f2
+#define f3  $$f3
+#define f12 $$f12
+#define f13 $$f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+    bgec    \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+    bltc    \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  s0  rPC       interpreted program counter, used for fetching instructions
+  s1  rFP       interpreted frame pointer, used for accessing locals and args
+  s2  rSELF     self (Thread) pointer
+  s3  rINST     first 16-bit code unit of current instruction
+  s4  rIBASE    interpreted instruction base pointer, used for computed goto
+  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC     s0
+#define rFP     s1
+#define rSELF   s2
+#define rINST   s3
+#define rIBASE  s4
+#define rREFS   s5
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    lhu     rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+    daddu   rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+    ADVANCE \count
+    FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+    lhu     rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+    and     \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+    .set noat
+    sll     AT, \reg, 7
+    daddu   AT, rIBASE, AT
+    jic     AT, 0
+    .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_U reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwu     \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    lw      AT, 4(AT)
+    dinsu   \reg, AT, 32, 32
+    .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    lw      AT, 4(AT)
+    mthc1   AT, \reg
+    .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    drotr32 \reg, \reg, 0
+    sw      \reg, 4(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    mfhc1   \vreg, \reg
+    sw      \vreg, 4(AT)
+    .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_SIZE      64
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN             0x80000000
+#define INT_MIN_AS_FLOAT    0xCF000000
+#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
+#define LONG_MIN            0x8000000000000000
+#define LONG_MIN_AS_FLOAT   0xDF000000
+#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
new file mode 100644
index 0000000..4ae4fb1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/invoke.S
@@ -0,0 +1,17 @@
+%default { "helper":"UndefinedInvokeHandler" }
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern $helper
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     $helper
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_add_double.S b/runtime/interpreter/mterp/mips64/op_add_double.S
new file mode 100644
index 0000000..1520e32
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
new file mode 100644
index 0000000..c14382e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float.S b/runtime/interpreter/mterp/mips64/op_add_float.S
new file mode 100644
index 0000000..c6ed558
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
new file mode 100644
index 0000000..4c20547
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int.S b/runtime/interpreter/mterp/mips64/op_add_int.S
new file mode 100644
index 0000000..6e569de
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
new file mode 100644
index 0000000..2a84124
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
new file mode 100644
index 0000000..94b053b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
new file mode 100644
index 0000000..3b6d734
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long.S b/runtime/interpreter/mterp/mips64/op_add_long.S
new file mode 100644
index 0000000..c8d702f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
new file mode 100644
index 0000000..928ff54
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aget.S b/runtime/interpreter/mterp/mips64/op_aget.S
new file mode 100644
index 0000000..0472a06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget.S
@@ -0,0 +1,29 @@
+%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if $shift
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    $load   a2, $data_offset(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_boolean.S b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
new file mode 100644
index 0000000..d5be01b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_byte.S b/runtime/interpreter/mterp/mips64/op_aget_byte.S
new file mode 100644
index 0000000..084de8d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_char.S b/runtime/interpreter/mterp/mips64/op_aget_char.S
new file mode 100644
index 0000000..6c99ed5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_object.S b/runtime/interpreter/mterp/mips64/op_aget_object.S
new file mode 100644
index 0000000..6374a05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_object.S
@@ -0,0 +1,21 @@
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    .extern artAGetObjectFromMterp
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    jal     artAGetObjectFromMterp      # (array, index)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a4, rINST, 8                # a4 <- AA
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    SET_VREG_OBJECT v0, a4              # vAA <- v0
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_short.S b/runtime/interpreter/mterp/mips64/op_aget_short.S
new file mode 100644
index 0000000..0158b0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_wide.S b/runtime/interpreter/mterp/mips64/op_aget_wide.S
new file mode 100644
index 0000000..0945aca
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_wide.S
@@ -0,0 +1,21 @@
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     */
+    /* aget-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a2, a4                # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_and_int.S b/runtime/interpreter/mterp/mips64/op_and_int.S
new file mode 100644
index 0000000..f0792a8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
new file mode 100644
index 0000000..08dc615
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
new file mode 100644
index 0000000..65d28ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
new file mode 100644
index 0000000..ab84bb7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long.S b/runtime/interpreter/mterp/mips64/op_and_long.S
new file mode 100644
index 0000000..e383ba0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
new file mode 100644
index 0000000..f863bb9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aput.S b/runtime/interpreter/mterp/mips64/op_aput.S
new file mode 100644
index 0000000..9bfda97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput.S
@@ -0,0 +1,29 @@
+%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if $shift
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    $store  a2, $data_offset(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_boolean.S b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
new file mode 100644
index 0000000..6707a1f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_byte.S b/runtime/interpreter/mterp/mips64/op_aput_byte.S
new file mode 100644
index 0000000..7b9ce48
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_char.S b/runtime/interpreter/mterp/mips64/op_aput_char.S
new file mode 100644
index 0000000..82bc8f7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_object.S b/runtime/interpreter/mterp/mips64/op_aput_object.S
new file mode 100644
index 0000000..b132456
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_object.S
@@ -0,0 +1,14 @@
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     */
+    /* op vAA, vBB, vCC */
+    .extern MterpAputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpAputObject
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_short.S b/runtime/interpreter/mterp/mips64/op_aput_short.S
new file mode 100644
index 0000000..a7af294
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_wide.S b/runtime/interpreter/mterp/mips64/op_aput_wide.S
new file mode 100644
index 0000000..a1d7a3b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_wide.S
@@ -0,0 +1,21 @@
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     */
+    /* aput-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    GET_VREG_WIDE a2, a4                # a2 <- vAA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    dsrl32  a2, a2, 0
+    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_array_length.S b/runtime/interpreter/mterp/mips64/op_array_length.S
new file mode 100644
index 0000000..2d9e172
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_array_length.S
@@ -0,0 +1,12 @@
+    /*
+     * Return the length of an array.
+     */
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a0, common_errNullObject    # yup, fail
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a3, a2                     # vB <- length
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_check_cast.S b/runtime/interpreter/mterp/mips64/op_check_cast.S
new file mode 100644
index 0000000..472595d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_check_cast.S
@@ -0,0 +1,17 @@
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class//BBBB */
+    .extern MterpCheckCast
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpCheckCast              # (index, &obj, method, self)
+    PREFETCH_INST 2
+    bnez    v0, MterpPossibleException
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmp_long.S b/runtime/interpreter/mterp/mips64/op_cmp_long.S
new file mode 100644
index 0000000..6e9376c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmp_long.S
@@ -0,0 +1,13 @@
+    /* cmp-long vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    slt     a2, a0, a1
+    slt     a0, a1, a0
+    subu    a0, a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_double.S b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
new file mode 100644
index 0000000..a8e2ef9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "mips64/fcmpWide.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_float.S b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
new file mode 100644
index 0000000..0c93eac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "mips64/fcmp.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_double.S b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
new file mode 100644
index 0000000..9111b06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
@@ -0,0 +1 @@
+%include "mips64/fcmpWide.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_float.S b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
new file mode 100644
index 0000000..b047451
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
@@ -0,0 +1 @@
+%include "mips64/fcmp.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_const.S b/runtime/interpreter/mterp/mips64/op_const.S
new file mode 100644
index 0000000..4b0d69b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const.S
@@ -0,0 +1,9 @@
+    /* const vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_16.S b/runtime/interpreter/mterp/mips64/op_const_16.S
new file mode 100644
index 0000000..51e68a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_16.S
@@ -0,0 +1,7 @@
+    /* const/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_4.S b/runtime/interpreter/mterp/mips64/op_const_4.S
new file mode 100644
index 0000000..0a58bff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_4.S
@@ -0,0 +1,8 @@
+    /* const/4 vA, #+B */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    seh     a0, rINST                   # sign extend B in rINST
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    sra     a0, a0, 12                  # shift B into its final position
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- +B
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S
new file mode 100644
index 0000000..adf79df
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_class.S
@@ -0,0 +1,13 @@
+    /* const/class vAA, Class//BBBB */
+    .extern MterpConstClass
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstClass             # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_high16.S b/runtime/interpreter/mterp/mips64/op_const_high16.S
new file mode 100644
index 0000000..43effb6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_high16.S
@@ -0,0 +1,8 @@
+    /* const/high16 vAA, #+BBBB0000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    sll     a0, a0, 16                  # a0 <- BBBB0000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB0000
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S
new file mode 100644
index 0000000..4684c11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_string.S
@@ -0,0 +1,13 @@
+    /* const/string vAA, String//BBBB */
+    .extern MterpConstString
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
new file mode 100644
index 0000000..47f2101
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+    /* const/string vAA, String//BBBBBBBB */
+    .extern MterpConstString
+    EXPORT_PC
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
+    srl     a1, rINST, 8                # a1 <- AA
+    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 3                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 3                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide.S b/runtime/interpreter/mterp/mips64/op_const_wide.S
new file mode 100644
index 0000000..f7eaf7c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide.S
@@ -0,0 +1,13 @@
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    srl     a4, rINST, 8                # a4 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
+    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
+    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
+    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
+    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_16.S b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
new file mode 100644
index 0000000..3a70937
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
@@ -0,0 +1,7 @@
+    /* const-wide/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_32.S b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
new file mode 100644
index 0000000..867197c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
@@ -0,0 +1,9 @@
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
new file mode 100644
index 0000000..d741631
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
@@ -0,0 +1,8 @@
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_div_double.S b/runtime/interpreter/mterp/mips64/op_div_double.S
new file mode 100644
index 0000000..44998f0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
new file mode 100644
index 0000000..396af79
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float.S b/runtime/interpreter/mterp/mips64/op_div_float.S
new file mode 100644
index 0000000..7b09d52
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
new file mode 100644
index 0000000..e74fdda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int.S b/runtime/interpreter/mterp/mips64/op_div_int.S
new file mode 100644
index 0000000..fb04acb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
new file mode 100644
index 0000000..db29b84
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
new file mode 100644
index 0000000..e903dde
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
new file mode 100644
index 0000000..0559605
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long.S b/runtime/interpreter/mterp/mips64/op_div_long.S
new file mode 100644
index 0000000..01fc2b2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
new file mode 100644
index 0000000..9627ab8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_float.S b/runtime/interpreter/mterp/mips64/op_double_to_float.S
new file mode 100644
index 0000000..2b2acee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_float.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    cvt.s.d f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
new file mode 100644
index 0000000..aa2cbca
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_int.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    dli     t0, INT_MIN_AS_DOUBLE
+    dmtc1   t0, f1
+    cmp.le.d f1, f1, f0
+    bc1nez  f1, .L${opcode}_trunc
+    cmp.eq.d f1, f0, f0
+    li      t0, INT_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .L${opcode}_done
+%break
+.L${opcode}_trunc:
+    trunc.w.d f0, f0
+    mfc1    t0, f0
+.L${opcode}_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
new file mode 100644
index 0000000..777cfeb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_long.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    dli     t0, LONG_MIN_AS_DOUBLE
+    dmtc1   t0, f1
+    cmp.le.d f1, f1, f0
+    bc1nez  f1, .L${opcode}_trunc
+    cmp.eq.d f1, f0, f0
+    dli     t0, LONG_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .L${opcode}_done
+%break
+.L${opcode}_trunc:
+    trunc.l.d f0, f0
+    dmfc1   t0, f0
+.L${opcode}_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_fill_array_data.S b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
new file mode 100644
index 0000000..c90f0b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
@@ -0,0 +1,14 @@
+    /* fill-array-data vAA, +BBBBBBBB */
+    .extern MterpFillArrayData
+    EXPORT_PC
+    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
+    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
+    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
+    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
+    jal     MterpFillArrayData          # (obj, payload)
+    beqzc   v0, MterpPossibleException  # exception?
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array.S b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
new file mode 100644
index 0000000..35f55c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
@@ -0,0 +1,18 @@
+%default { "helper":"MterpFilledNewArray" }
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+    .extern $helper
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rSELF
+    jal     $helper
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
new file mode 100644
index 0000000..a4e18f6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "mips64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_double.S b/runtime/interpreter/mterp/mips64/op_float_to_double.S
new file mode 100644
index 0000000..6accfee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_double.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    cvt.d.s f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
new file mode 100644
index 0000000..d957540
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_int.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li      t0, INT_MIN_AS_FLOAT
+    mtc1    t0, f1
+    cmp.le.s f1, f1, f0
+    bc1nez  f1, .L${opcode}_trunc
+    cmp.eq.s f1, f0, f0
+    li      t0, INT_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .L${opcode}_done
+%break
+.L${opcode}_trunc:
+    trunc.w.s f0, f0
+    mfc1    t0, f0
+.L${opcode}_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
new file mode 100644
index 0000000..5d036c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_long.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li      t0, LONG_MIN_AS_FLOAT
+    mtc1    t0, f1
+    cmp.le.s f1, f1, f0
+    bc1nez  f1, .L${opcode}_trunc
+    cmp.eq.s f1, f0, f0
+    dli     t0, LONG_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .L${opcode}_done
+%break
+.L${opcode}_trunc:
+    trunc.l.s f0, f0
+    dmfc1   t0, f0
+.L${opcode}_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto.S b/runtime/interpreter/mterp/mips64/op_goto.S
new file mode 100644
index 0000000..f2df3e4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto.S
@@ -0,0 +1,23 @@
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    srl     a0, rINST, 8
+    seb     a0, a0                      # a0 <- sign-extended AA
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a0, 1f                      # AA * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto_16.S b/runtime/interpreter/mterp/mips64/op_goto_16.S
new file mode 100644
index 0000000..cbf8cf2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto_16.S
@@ -0,0 +1,22 @@
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    lh      a0, 2(rPC)                  # a0 <- sign-extended AAAA
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AAAA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a0, 1f                      # AA * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto_32.S b/runtime/interpreter/mterp/mips64/op_goto_32.S
new file mode 100644
index 0000000..4a1feac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto_32.S
@@ -0,0 +1,27 @@
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    lh      a0, 2(rPC)                  # a0 <- aaaa (low)
+    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
+    ins     a0, a1, 16, 16              # a0 = sign-extended AAAAaaaa
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AAAAAAAA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgtz    a0, 1f                      # AA * 2 > 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    blez    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_if_eq.S b/runtime/interpreter/mterp/mips64/op_if_eq.S
new file mode 100644
index 0000000..aa35cad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_eq.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_eqz.S b/runtime/interpreter/mterp/mips64/op_if_eqz.S
new file mode 100644
index 0000000..0fe3418
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_eqz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ge.S b/runtime/interpreter/mterp/mips64/op_if_ge.S
new file mode 100644
index 0000000..59fdcc5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ge.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gez.S b/runtime/interpreter/mterp/mips64/op_if_gez.S
new file mode 100644
index 0000000..57f1f66
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gt.S b/runtime/interpreter/mterp/mips64/op_if_gt.S
new file mode 100644
index 0000000..26cc119
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gt.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gtz.S b/runtime/interpreter/mterp/mips64/op_if_gtz.S
new file mode 100644
index 0000000..69fcacb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gtz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_le.S b/runtime/interpreter/mterp/mips64/op_if_le.S
new file mode 100644
index 0000000..a7fce17
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_le.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lez.S b/runtime/interpreter/mterp/mips64/op_if_lez.S
new file mode 100644
index 0000000..f3edcc6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_lez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lt.S b/runtime/interpreter/mterp/mips64/op_if_lt.S
new file mode 100644
index 0000000..a975a31
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_lt.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ltz.S b/runtime/interpreter/mterp/mips64/op_if_ltz.S
new file mode 100644
index 0000000..c1d730d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ltz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ne.S b/runtime/interpreter/mterp/mips64/op_if_ne.S
new file mode 100644
index 0000000..f143ee9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ne.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_nez.S b/runtime/interpreter/mterp/mips64/op_if_nez.S
new file mode 100644
index 0000000..1856b96
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_nez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
new file mode 100644
index 0000000..ade4b31
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern $helper
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      $helper
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if $is_object
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
new file mode 100644
index 0000000..cb2c8be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
new file mode 100644
index 0000000..979dc70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
new file mode 100644
index 0000000..099d8d0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
new file mode 100644
index 0000000..cb35556
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
new file mode 100644
index 0000000..927b7af
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
new file mode 100644
index 0000000..6034567
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
new file mode 100644
index 0000000..c658556
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_object.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
new file mode 100644
index 0000000..171d543
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
@@ -0,0 +1,16 @@
+    /* For: iget-object-quick */
+    /* op vA, vB, offset//CCCC */
+    .extern artIGetObjectFromMterp
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- object we're operating on
+    jal     artIGetObjectFromMterp      # (obj, offset)
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a3, MterpPossibleException  # bail out
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_quick.S b/runtime/interpreter/mterp/mips64/op_iget_quick.S
new file mode 100644
index 0000000..fee6ab7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"lw" }
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    $load   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
new file mode 100644
index 0000000..28b5093
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
new file mode 100644
index 0000000..6e152db
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
new file mode 100644
index 0000000..85cf670
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide.S
@@ -0,0 +1,21 @@
+    /*
+     * 64-bit instance field get.
+     *
+     * for: iget-wide
+     */
+    .extern artGet64InstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGet64InstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    SET_VREG_WIDE v0, a2                # fp[A] <- v0
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
new file mode 100644
index 0000000..2adc6ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
@@ -0,0 +1,14 @@
+    /* iget-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a4, 2(rPC)                  # a4 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    daddu   a4, a3, a4                  # create direct pointer
+    lw      a0, 0(a4)
+    lw      a1, 4(a4)
+    dinsu   a0, a1, 32, 32
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG_WIDE a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_instance_of.S b/runtime/interpreter/mterp/mips64/op_instance_of.S
new file mode 100644
index 0000000..39a5dc7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_instance_of.S
@@ -0,0 +1,23 @@
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class//CCCC */
+    .extern MterpInstanceOf
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpInstanceOf             # (index, &obj, method, self)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    ADVANCE 2                           # advance rPC
+    SET_VREG v0, a2                     # vA <- v0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_byte.S b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
new file mode 100644
index 0000000..1993e07
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"seb     a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_char.S b/runtime/interpreter/mterp/mips64/op_int_to_char.S
new file mode 100644
index 0000000..8f03acd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_char.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"and     a0, a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_double.S b/runtime/interpreter/mterp/mips64/op_int_to_double.S
new file mode 100644
index 0000000..6df71be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_double.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    cvt.d.w f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_float.S b/runtime/interpreter/mterp/mips64/op_int_to_float.S
new file mode 100644
index 0000000..77e9eba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_float.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    cvt.s.w f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_long.S b/runtime/interpreter/mterp/mips64/op_int_to_long.S
new file mode 100644
index 0000000..7b9ad86
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_long.S
@@ -0,0 +1,8 @@
+    /* int-to-long vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_short.S b/runtime/interpreter/mterp/mips64/op_int_to_short.S
new file mode 100644
index 0000000..4a3f234
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_short.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"seh     a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct.S b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
new file mode 100644
index 0000000..5047118
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
new file mode 100644
index 0000000..5c9b95f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface.S b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
new file mode 100644
index 0000000..ed148ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeInterface" }
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
new file mode 100644
index 0000000..91c231e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static.S b/runtime/interpreter/mterp/mips64/op_invoke_static.S
new file mode 100644
index 0000000..44f5cb7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_static.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
new file mode 100644
index 0000000..289e5aa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super.S b/runtime/interpreter/mterp/mips64/op_invoke_super.S
new file mode 100644
index 0000000..b13fffe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeSuper" }
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
new file mode 100644
index 0000000..350b975
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
new file mode 100644
index 0000000..0d26cda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtual" }
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..f39562c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
new file mode 100644
index 0000000..0bb43f8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..c448851
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S
new file mode 100644
index 0000000..a906a0f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput.S
@@ -0,0 +1,21 @@
+%default { "helper":"artSet32InstanceFromMterp" }
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern $helper
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     $helper
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
new file mode 100644
index 0000000..3034fa5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
new file mode 100644
index 0000000..df99948
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S
new file mode 100644
index 0000000..3034fa5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
new file mode 100644
index 0000000..df99948
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S
new file mode 100644
index 0000000..4c2fa28
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
new file mode 100644
index 0000000..a6286b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S
new file mode 100644
index 0000000..9a42f54
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_object.S
@@ -0,0 +1,11 @@
+    .extern MterpIputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpIputObject
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
new file mode 100644
index 0000000..658ef42
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
@@ -0,0 +1,10 @@
+    .extern MterpIputObjectQuick
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpIputObjectQuick
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_quick.S b/runtime/interpreter/mterp/mips64/op_iput_quick.S
new file mode 100644
index 0000000..b95adfc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"sw" }
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    $store  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S
new file mode 100644
index 0000000..4c2fa28
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
new file mode 100644
index 0000000..a6286b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S
new file mode 100644
index 0000000..9b790f8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_wide.S
@@ -0,0 +1,15 @@
+    /* iput-wide vA, vB, field//CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext      a2, rINST, 8, 4            # a2 <- A
+    dlsa     a2, a2, rFP, 2             # a2 <- &fp[A]
+    ld       a3, OFF_FP_METHOD(rFP)     # a3 <- referrer
+    PREFETCH_INST 2
+    jal      artSet64InstanceFromMterp
+    bnez     v0, MterpPossibleException # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
new file mode 100644
index 0000000..95a8ad8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
@@ -0,0 +1,14 @@
+    /* iput-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a3, 2(rPC)                  # a3 <- field byte offset
+    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
+    ext     a0, rINST, 8, 4             # a0 <- A
+    beqz    a2, common_errNullObject    # object was null
+    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a2, a3                  # create a direct pointer
+    sw      a0, 0(a1)
+    dsrl32  a0, a0, 0
+    sw      a0, 4(a1)
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_double.S b/runtime/interpreter/mterp/mips64/op_long_to_double.S
new file mode 100644
index 0000000..8503e76
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_double.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    cvt.d.l f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_float.S b/runtime/interpreter/mterp/mips64/op_long_to_float.S
new file mode 100644
index 0000000..31f5c0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_float.S
@@ -0,0 +1,8 @@
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    cvt.s.l f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_int.S b/runtime/interpreter/mterp/mips64/op_long_to_int.S
new file mode 100644
index 0000000..4ef4b51
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "mips64/op_move.S"
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_enter.S b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
new file mode 100644
index 0000000..36ae503
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
@@ -0,0 +1,14 @@
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    .extern artLockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artLockObjectFromCode
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_exit.S b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
new file mode 100644
index 0000000..9945952
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
@@ -0,0 +1,18 @@
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    .extern artUnlockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move.S b/runtime/interpreter/mterp/mips64/op_move.S
new file mode 100644
index 0000000..c79f6cd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vA <- vB
+    .else
+    SET_VREG a0, a2                     # vA <- vB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_16.S b/runtime/interpreter/mterp/mips64/op_move_16.S
new file mode 100644
index 0000000..9d5c4dc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAAAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_exception.S b/runtime/interpreter/mterp/mips64/op_move_exception.S
new file mode 100644
index 0000000..d226718
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_exception.S
@@ -0,0 +1,8 @@
+    /* move-exception vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_from16.S b/runtime/interpreter/mterp/mips64/op_move_from16.S
new file mode 100644
index 0000000..6d6bde0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_object.S b/runtime/interpreter/mterp/mips64/op_move_object.S
new file mode 100644
index 0000000..47e0272
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object.S
@@ -0,0 +1 @@
+%include "mips64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_16.S b/runtime/interpreter/mterp/mips64/op_move_object_16.S
new file mode 100644
index 0000000..a777dcd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object_16.S
@@ -0,0 +1 @@
+%include "mips64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_from16.S b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
new file mode 100644
index 0000000..ab55ebd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "mips64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result.S b/runtime/interpreter/mterp/mips64/op_move_result.S
new file mode 100644
index 0000000..1ec28cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    lw      a0, 0(a0)                   # a0 <- result.i
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if $is_object
+    SET_VREG_OBJECT a0, a2              # vAA <- result
+    .else
+    SET_VREG a0, a2                     # vAA <- result
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_object.S b/runtime/interpreter/mterp/mips64/op_move_result_object.S
new file mode 100644
index 0000000..e76bc22
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result_object.S
@@ -0,0 +1 @@
+%include "mips64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_wide.S b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
new file mode 100644
index 0000000..3ba0d72
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
@@ -0,0 +1,9 @@
+    /* for: move-result-wide */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    ld      a0, 0(a0)                   # a0 <- result.j
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide.S b/runtime/interpreter/mterp/mips64/op_move_wide.S
new file mode 100644
index 0000000..ea23f87
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide.S
@@ -0,0 +1,9 @@
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_16.S b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
new file mode 100644
index 0000000..8ec6068
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
@@ -0,0 +1,9 @@
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
new file mode 100644
index 0000000..11d5603
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
@@ -0,0 +1,9 @@
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double.S b/runtime/interpreter/mterp/mips64/op_mul_double.S
new file mode 100644
index 0000000..e7e17f7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
new file mode 100644
index 0000000..f404d46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float.S b/runtime/interpreter/mterp/mips64/op_mul_float.S
new file mode 100644
index 0000000..9a695fc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
new file mode 100644
index 0000000..a134a34
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int.S b/runtime/interpreter/mterp/mips64/op_mul_int.S
new file mode 100644
index 0000000..e1b90ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
new file mode 100644
index 0000000..c0c4063
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
new file mode 100644
index 0000000..bb4fff8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
new file mode 100644
index 0000000..da11ea9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long.S b/runtime/interpreter/mterp/mips64/op_mul_long.S
new file mode 100644
index 0000000..ec32850
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
new file mode 100644
index 0000000..eb50cda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_double.S b/runtime/interpreter/mterp/mips64/op_neg_double.S
new file mode 100644
index 0000000..a135d61
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_double.S
@@ -0,0 +1,3 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+    neg.d   f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_float.S b/runtime/interpreter/mterp/mips64/op_neg_float.S
new file mode 100644
index 0000000..78019f0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_float.S
@@ -0,0 +1,3 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+    neg.s   f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_int.S b/runtime/interpreter/mterp/mips64/op_neg_int.S
new file mode 100644
index 0000000..31538c0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_int.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"subu    a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_long.S b/runtime/interpreter/mterp/mips64/op_neg_long.S
new file mode 100644
index 0000000..bc80d06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_long.S
@@ -0,0 +1 @@
+%include "mips64/unopWide.S" {"instr":"dsubu   a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_new_array.S b/runtime/interpreter/mterp/mips64/op_new_array.S
new file mode 100644
index 0000000..d78b4ac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_new_array.S
@@ -0,0 +1,19 @@
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class//CCCC */
+    .extern MterpNewArray
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpNewArray
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_new_instance.S b/runtime/interpreter/mterp/mips64/op_new_instance.S
new file mode 100644
index 0000000..cc5e13e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_new_instance.S
@@ -0,0 +1,14 @@
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class//BBBB */
+    .extern MterpNewInstance
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rSELF
+    move    a2, rINST
+    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_nop.S b/runtime/interpreter/mterp/mips64/op_nop.S
new file mode 100644
index 0000000..cc803a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_nop.S
@@ -0,0 +1,3 @@
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_not_int.S b/runtime/interpreter/mterp/mips64/op_not_int.S
new file mode 100644
index 0000000..5954095
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_not_int.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"nor     a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_not_long.S b/runtime/interpreter/mterp/mips64/op_not_long.S
new file mode 100644
index 0000000..c8f5da7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_not_long.S
@@ -0,0 +1 @@
+%include "mips64/unopWide.S" {"instr":"nor     a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int.S b/runtime/interpreter/mterp/mips64/op_or_int.S
new file mode 100644
index 0000000..0102355
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
new file mode 100644
index 0000000..eed8900
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
new file mode 100644
index 0000000..16a0f3e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
new file mode 100644
index 0000000..dbbf790
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long.S b/runtime/interpreter/mterp/mips64/op_or_long.S
new file mode 100644
index 0000000..e6f8639
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
new file mode 100644
index 0000000..ad5e6c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S
new file mode 100644
index 0000000..cdbdf75
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_packed_switch.S
@@ -0,0 +1,31 @@
+%default { "func":"MterpDoPackedSwitch" }
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBBBBBB */
+    .extern $func
+    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
+    GET_VREG a1, a3                     # a1 <- vAA
+    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
+    jal     $func                       # v0 <- code-unit branch offset
+    dlsa    rPC, v0, rPC, 1             # rPC <- rPC + offset * 2
+    FETCH_INST                          # load rINST
+#if MTERP_SUSPEND
+    bgtz    v0, 1f                      # offset * 2 > 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    blez    v0, MterpCheckSuspendAndContinue
+#endif
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double.S b/runtime/interpreter/mterp/mips64/op_rem_double.S
new file mode 100644
index 0000000..ba61cfd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_double.S
@@ -0,0 +1,12 @@
+    /* rem-double vAA, vBB, vCC */
+    .extern fmod
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
+    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
+    jal     fmod                        # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
new file mode 100644
index 0000000..c649f0d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
@@ -0,0 +1,12 @@
+    /* rem-double/2addr vA, vB */
+    .extern fmod
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f12, a2             # f12 <- vA
+    GET_VREG_DOUBLE f13, a3             # f13 <- vB
+    jal     fmod                        # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float.S b/runtime/interpreter/mterp/mips64/op_rem_float.S
new file mode 100644
index 0000000..3967b0b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_float.S
@@ -0,0 +1,12 @@
+    /* rem-float vAA, vBB, vCC */
+    .extern fmodf
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f12, a2              # f12 <- vBB
+    GET_VREG_FLOAT f13, a3              # f13 <- vCC
+    jal     fmodf                       # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
new file mode 100644
index 0000000..3fed41e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
@@ -0,0 +1,12 @@
+    /* rem-float/2addr vA, vB */
+    .extern fmodf
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f12, a2              # f12 <- vA
+    GET_VREG_FLOAT f13, a3              # f13 <- vB
+    jal     fmodf                       # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int.S b/runtime/interpreter/mterp/mips64/op_rem_int.S
new file mode 100644
index 0000000..c05e9c4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
new file mode 100644
index 0000000..a4e162d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
new file mode 100644
index 0000000..3284f14
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
new file mode 100644
index 0000000..1e6a584
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long.S b/runtime/interpreter/mterp/mips64/op_rem_long.S
new file mode 100644
index 0000000..32b2d19
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
new file mode 100644
index 0000000..ad658e1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
new file mode 100644
index 0000000..ec986b8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return.S
@@ -0,0 +1,18 @@
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA
+    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
new file mode 100644
index 0000000..67f1871
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_object.S
@@ -0,0 +1 @@
+%include "mips64/op_return.S"
diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S
new file mode 100644
index 0000000..05253ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_void.S
@@ -0,0 +1,11 @@
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
new file mode 100644
index 0000000..f67e811
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
@@ -0,0 +1,9 @@
+    .extern MterpSuspendCheck
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S
new file mode 100644
index 0000000..544e027
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_wide.S
@@ -0,0 +1,17 @@
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vAA
+    b       MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int.S b/runtime/interpreter/mterp/mips64/op_rsub_int.S
new file mode 100644
index 0000000..fa31a0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rsub_int.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
new file mode 100644
index 0000000..c31ff32
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
new file mode 100644
index 0000000..bd2cfe3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern $helper
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     $helper
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    $extend
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if $is_object
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
new file mode 100644
index 0000000..e7b1844
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
new file mode 100644
index 0000000..52a2e4a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
new file mode 100644
index 0000000..873d82a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
new file mode 100644
index 0000000..3108417
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_object.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
new file mode 100644
index 0000000..fed4e76
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
new file mode 100644
index 0000000..77124d1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S
@@ -0,0 +1,18 @@
+    /*
+     * SGET_WIDE handler wrapper.
+     *
+     */
+    /* sget-wide vAA, field//BBBB */
+    .extern artGet64StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGet64StaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a4, rINST, 8                # a4 <- AA
+    bnez    a3, MterpException          # bail out
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG_WIDE v0, a4
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int.S b/runtime/interpreter/mterp/mips64/op_shl_int.S
new file mode 100644
index 0000000..784481f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
new file mode 100644
index 0000000..a6c8a78
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
new file mode 100644
index 0000000..36ef207
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long.S b/runtime/interpreter/mterp/mips64/op_shl_long.S
new file mode 100644
index 0000000..225a2cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
new file mode 100644
index 0000000..c04d882
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int.S b/runtime/interpreter/mterp/mips64/op_shr_int.S
new file mode 100644
index 0000000..eded037
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
new file mode 100644
index 0000000..5b4d96f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
new file mode 100644
index 0000000..175eb86
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long.S b/runtime/interpreter/mterp/mips64/op_shr_long.S
new file mode 100644
index 0000000..0db38c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
new file mode 100644
index 0000000..48131ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sparse_switch.S b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
new file mode 100644
index 0000000..b065aaa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "mips64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
new file mode 100644
index 0000000..142f18f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput.S
@@ -0,0 +1,20 @@
+%default { "helper":"artSet32StaticFromCode" }
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern $helper
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     $helper
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
new file mode 100644
index 0000000..f5b8dbf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
new file mode 100644
index 0000000..f5b8dbf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
new file mode 100644
index 0000000..c4d195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S
new file mode 100644
index 0000000..ef4c685
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_object.S
@@ -0,0 +1,11 @@
+    .extern MterpSputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpSputObject
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
new file mode 100644
index 0000000..c4d195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
new file mode 100644
index 0000000..828ddc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S
@@ -0,0 +1,18 @@
+    /*
+     * SPUT_WIDE handler wrapper.
+     *
+     */
+    /* sput-wide vAA, field//BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    srl     a2, rINST, 8                # a2 <- AA
+    dlsa    a2, a2, rFP, 2
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet64IndirectStaticFromMterp
+    bnezc   v0, MterpException          # 0 on success, -1 on failure
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double.S b/runtime/interpreter/mterp/mips64/op_sub_double.S
new file mode 100644
index 0000000..40a6c89
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
new file mode 100644
index 0000000..984737e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float.S b/runtime/interpreter/mterp/mips64/op_sub_float.S
new file mode 100644
index 0000000..9010592
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
new file mode 100644
index 0000000..e7d4ffe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int.S b/runtime/interpreter/mterp/mips64/op_sub_int.S
new file mode 100644
index 0000000..609ea05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
new file mode 100644
index 0000000..ba2f1e8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long.S b/runtime/interpreter/mterp/mips64/op_sub_long.S
new file mode 100644
index 0000000..09a6afd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
new file mode 100644
index 0000000..b9ec82a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_throw.S b/runtime/interpreter/mterp/mips64/op_throw.S
new file mode 100644
index 0000000..6418d57
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_throw.S
@@ -0,0 +1,10 @@
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
+    beqzc   a0, common_errNullObject
+    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
+    b       MterpException
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3e.S b/runtime/interpreter/mterp/mips64/op_unused_3e.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_3e.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3f.S b/runtime/interpreter/mterp/mips64/op_unused_3f.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_3f.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_40.S b/runtime/interpreter/mterp/mips64/op_unused_40.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_40.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_41.S b/runtime/interpreter/mterp/mips64/op_unused_41.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_41.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_42.S b/runtime/interpreter/mterp/mips64/op_unused_42.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_42.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_43.S b/runtime/interpreter/mterp/mips64/op_unused_43.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_43.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_79.S b/runtime/interpreter/mterp/mips64/op_unused_79.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_79.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_7a.S b/runtime/interpreter/mterp/mips64/op_unused_7a.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_7a.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f4.S b/runtime/interpreter/mterp/mips64/op_unused_f4.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f4.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fa.S b/runtime/interpreter/mterp/mips64/op_unused_fa.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fa.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fb.S b/runtime/interpreter/mterp/mips64/op_unused_fb.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fb.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fc.S b/runtime/interpreter/mterp/mips64/op_unused_fc.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fc.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fd.S b/runtime/interpreter/mterp/mips64/op_unused_fd.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fd.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fe.S b/runtime/interpreter/mterp/mips64/op_unused_fe.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fe.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_ff.S b/runtime/interpreter/mterp/mips64/op_unused_ff.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_ff.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int.S b/runtime/interpreter/mterp/mips64/op_ushr_int.S
new file mode 100644
index 0000000..37c90cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
new file mode 100644
index 0000000..d6bf413
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
new file mode 100644
index 0000000..2a2d843
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long.S b/runtime/interpreter/mterp/mips64/op_ushr_long.S
new file mode 100644
index 0000000..e724405
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
new file mode 100644
index 0000000..d2cf135
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int.S b/runtime/interpreter/mterp/mips64/op_xor_int.S
new file mode 100644
index 0000000..ee25ebc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
new file mode 100644
index 0000000..0f04967
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
new file mode 100644
index 0000000..ecb21ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
new file mode 100644
index 0000000..115ae99
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long.S b/runtime/interpreter/mterp/mips64/op_xor_long.S
new file mode 100644
index 0000000..7ebabc2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
new file mode 100644
index 0000000..0f1919a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/unop.S b/runtime/interpreter/mterp/mips64/unop.S
new file mode 100644
index 0000000..e3f7ea0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unop.S
@@ -0,0 +1,18 @@
+%default {"preinstr":""}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+    $preinstr                           # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $instr                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unopWide.S b/runtime/interpreter/mterp/mips64/unopWide.S
new file mode 100644
index 0000000..c0dd1aa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unopWide.S
@@ -0,0 +1,17 @@
+%default {"preinstr":""}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * For: not-long, neg-long
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+    $preinstr                           # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    $instr                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unused.S b/runtime/interpreter/mterp/mips64/unused.S
new file mode 100644
index 0000000..30d38bd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/zcmp.S b/runtime/interpreter/mterp/mips64/zcmp.S
new file mode 100644
index 0000000..d7ad894
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/zcmp.S
@@ -0,0 +1,30 @@
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    b${condition}zc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 8f4741c..ca727f4 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -149,7 +149,9 @@
       Runtime::Current()->GetInstrumentation();
   bool unhandled_instrumentation;
   // TODO: enable for other targets after more extensive testing.
-  if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm)) {
+  if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm) ||
+      (kRuntimeISA == kX86_64) || (kRuntimeISA == kX86) ||
+      (kRuntimeISA == kMips)) {
     unhandled_instrumentation = instrumentation->NonJitProfilingActive();
   } else {
     unhandled_instrumentation = instrumentation->IsActive();
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 94cbd2d..2b74d4c 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -1534,14 +1534,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    bne .L_op_if_eq_not_taken
+    movne rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1549,31 +1550,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_eq_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    movne rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1589,14 +1572,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    beq .L_op_if_ne_not_taken
+    moveq rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1604,31 +1588,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_ne_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    moveq rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1644,14 +1610,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    bge .L_op_if_lt_not_taken
+    movge rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1659,31 +1626,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_lt_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    movge rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1699,14 +1648,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    blt .L_op_if_ge_not_taken
+    movlt rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1714,31 +1664,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_ge_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    movlt rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1754,14 +1686,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    ble .L_op_if_gt_not_taken
+    movle rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1769,31 +1702,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_gt_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    movle rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1809,14 +1724,15 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
     mov     r1, rINST, lsr #12          @ r1<- B
     ubfx    r0, rINST, #8, #4           @ r0<- A
     GET_VREG r3, r1                     @ r3<- vB
     GET_VREG r2, r0                     @ r2<- vA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     cmp     r2, r3                      @ compare (vA, vB)
-    bgt .L_op_if_le_not_taken
+    movgt rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1824,31 +1740,13 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_le_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r0, rINST, #8, #4           @ r0<- A
-    GET_VREG r3, r1                     @ r3<- vB
-    GET_VREG r2, r0                     @ r2<- vA
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    cmp     r2, r3                      @ compare (vA, vB)
-    movgt rINST, #2              @ rINST<- BYTE branch dist for not-taken
-    adds    r2, rINST, rINST            @ convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r2, rINST, rINST            @ convert to bytes, check sign
+    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
+    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1864,13 +1762,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    bne .L_op_if_eqz_not_taken
+    movne rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1878,28 +1777,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_eqz_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    movne rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1915,13 +1798,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    beq .L_op_if_nez_not_taken
+    moveq rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1929,28 +1813,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_nez_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    moveq rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -1966,13 +1834,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    bge .L_op_if_ltz_not_taken
+    movge rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -1980,28 +1849,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_ltz_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    movge rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -2017,13 +1870,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    blt .L_op_if_gez_not_taken
+    movlt rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -2031,28 +1885,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_gez_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    movlt rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -2068,13 +1906,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    ble .L_op_if_gtz_not_taken
+    movle rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -2082,28 +1921,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_gtz_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    movle rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
@@ -2119,13 +1942,14 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
     mov     r0, rINST, lsr #8           @ r0<- AA
     GET_VREG r2, r0                     @ r2<- vAA
     FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     cmp     r2, #0                      @ compare (vA, 0)
-    bgt .L_op_if_lez_not_taken
+    movgt rINST, #2
+#if MTERP_PROFILE_BRANCHES
+    @ TUNING: once measurements are complete, remove #if and hand-schedule.
     EXPORT_PC
     mov     r0, rSELF
     add     r1, rFP, #OFF_FP_SHADOWFRAME
@@ -2133,28 +1957,12 @@
     bl      MterpProfileBranch          @ (self, shadow_frame, offset)
     cmp     r0, #0
     bne     MterpOnStackReplacement     @ Note: offset must be in rINST
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-.L_op_if_lez_not_taken:
-    FETCH_ADVANCE_INST 2                @ update rPC, load rINST
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
-#else
-    mov     r0, rINST, lsr #8           @ r0<- AA
-    GET_VREG r2, r0                     @ r2<- vAA
-    FETCH_S rINST, 1                    @ rINST<- branch offset, in code units
-    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
-    cmp     r2, #0                      @ compare (vA, 0)
-    movgt rINST, #2              @ rINST<- inst branch dist for not-taken
-    adds    r1, rINST, rINST            @ convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
-    bmi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  @ extract opcode from rINST
-    GOTO_OPCODE ip                      @ jump to next instruction
 #endif
+    adds    r1, rINST, rINST            @ convert to bytes & set flags
+    FETCH_ADVANCE_INST_RB r1            @ update rPC, load rINST
+    bmi     MterpCheckSuspendAndContinue
+    GET_INST_OPCODE ip                  @ extract opcode from rINST
+    GOTO_OPCODE ip                      @ jump to next instruction
 
 
 /* ------------------------------ */
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index e4825f0..c7c0fb5 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -1372,31 +1372,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.eq .L_op_if_eq_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_eq_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1405,13 +1380,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, eq // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1427,31 +1410,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.ne .L_op_if_ne_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_ne_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1460,13 +1418,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, ne // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1482,31 +1448,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.lt .L_op_if_lt_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_lt_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1515,13 +1456,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, lt // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1537,31 +1486,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.ge .L_op_if_ge_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_ge_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1570,13 +1494,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, ge // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1592,31 +1524,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.gt .L_op_if_gt_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_gt_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1625,13 +1532,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, gt // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1647,31 +1562,6 @@
      * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
      */
     /* if-cmp vA, vB, +CCCC */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w1, wINST, #12              // w1<- B
-    ubfx    w0, wINST, #8, #4           // w0<- A
-    GET_VREG w3, w1                     // w3<- vB
-    GET_VREG w2, w0                     // w2<- vA
-    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
-    cmp     w2, w3                      // compare (vA, vB)
-    b.le .L_op_if_le_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_le_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes, check sign
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi     MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w1, wINST, #12              // w1<- B
     ubfx    w0, wINST, #8, #4           // w0<- A
     GET_VREG w3, w1                     // w3<- vB
@@ -1680,13 +1570,21 @@
     mov     w0, #2                      // Offset if branch not taken
     cmp     w2, w3                      // compare (vA, vB)
     csel    wINST, w1, w0, le // Branch if true, stashing result in callee save reg.
+#if MTERP_PROFILE_BRANCHES
+    // TUINING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31            // Sign extend branch offset
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in xINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes, check sign
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi     MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1702,42 +1600,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.eq .L_op_if_eqz_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_eqz_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, eq // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1753,42 +1636,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.ne .L_op_if_nez_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_nez_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, ne // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1804,42 +1672,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.lt .L_op_if_ltz_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_ltz_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, lt // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1855,42 +1708,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.ge .L_op_if_gez_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_gez_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, ge // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1906,42 +1744,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.gt .L_op_if_gtz_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_gtz_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, gt // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
@@ -1957,42 +1780,27 @@
      * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
      */
     /* if-cmp vAA, +BBBB */
-#if MTERP_PROFILE_BRANCHES
-    lsr     w0, wINST, #8               // w0<- AA
-    GET_VREG w2, w0                     // w2<- vAA
-    FETCH_S wINST, 1                    // w1<- branch offset, in code units
-    cmp     w2, #0                      // compare (vA, 0)
-    b.le .L_op_if_lez_taken
-    FETCH_ADVANCE_INST 2                // update rPC, load wINST
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-.L_op_if_lez_taken:
-    EXPORT_PC
-    mov     x0, xSELF
-    add     x1, xFP, #OFF_FP_SHADOWFRAME
-    sbfm    x2, xINST, 0, 31
-    bl      MterpProfileBranch          // (self, shadow_frame, offset)
-    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
-    ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
-    adds    w2, wINST, wINST            // convert to bytes & set flags
-    FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
-    b.mi    MterpCheckSuspendAndContinue
-    GET_INST_OPCODE ip                  // extract opcode from wINST
-    GOTO_OPCODE ip                      // jump to next instruction
-#else
     lsr     w0, wINST, #8               // w0<- AA
     GET_VREG w2, w0                     // w2<- vAA
     FETCH_S w1, 1                       // w1<- branch offset, in code units
     mov     w0, #2                      // Branch offset if not taken
     cmp     w2, #0                      // compare (vA, 0)
     csel    wINST, w1, w0, le // Branch if true, stashing result in callee save reg
+#if MTERP_PROFILE_BRANCHES
+    // TUNING: once measurements are complete, remove #if and hand-schedule.
+    EXPORT_PC
+    mov     x0, xSELF
+    add     x1, xFP, #OFF_FP_SHADOWFRAME
+    sbfm    x2, xINST, 0, 31
+    bl      MterpProfileBranch          // (self, shadow_frame, offset)
+    cbnz    w0, MterpOnStackReplacement // Note: offset must be in wINST
+#endif
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     adds    w2, wINST, wINST            // convert to bytes & set flags
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     b.mi    MterpCheckSuspendAndContinue
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
-#endif
 
 
 /* ------------------------------ */
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
new file mode 100644
index 0000000..7ae1ab1
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -0,0 +1,13157 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2    /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64           /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6    /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+   reg  nick      purpose
+   s0   rPC       interpreted program counter, used for fetching instructions
+   s1   rFP       interpreted frame pointer, used for accessing locals and args
+   s2   rSELF     self (Thread) pointer
+   s3   rIBASE    interpreted instruction base pointer, used for computed goto
+   s4   rINST     first 16-bit code unit of current instruction
+   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero    $0      /* always zero */
+#define AT      $at     /* assembler temp */
+#define v0      $2      /* return value */
+#define v1      $3
+#define a0      $4      /* argument registers */
+#define a1      $5
+#define a2      $6
+#define a3      $7
+#define t0      $8      /* temp registers (not saved across subroutine calls) */
+#define t1      $9
+#define t2      $10
+#define t3      $11
+#define t4      $12
+#define t5      $13
+#define t6      $14
+#define t7      $15
+#define ta0     $12     /* alias */
+#define ta1     $13
+#define ta2     $14
+#define ta3     $15
+#define s0      $16     /* saved across subroutine calls (callee saved) */
+#define s1      $17
+#define s2      $18
+#define s3      $19
+#define s4      $20
+#define s5      $21
+#define s6      $22
+#define s7      $23
+#define t8      $24     /* two more temp registers */
+#define t9      $25
+#define k0      $26     /* kernel temporary */
+#define k1      $27
+#define gp      $28     /* global pointer */
+#define sp      $29     /* stack pointer */
+#define s8      $30     /* one more callee saved */
+#define ra      $31     /* return address */
+
+/* FP register definitions */
+#define fv0    $f0
+#define fv0f   $f1
+#define fv1    $f2
+#define fv1f   $f3
+#define fa0    $f12
+#define fa0f   $f13
+#define fa1    $f14
+#define fa1f   $f15
+#define ft0    $f4
+#define ft0f   $f5
+#define ft1    $f6
+#define ft1f   $f7
+#define ft2    $f8
+#define ft2f   $f9
+#define ft3    $f10
+#define ft3f   $f11
+#define ft4    $f16
+#define ft4f   $f17
+#define ft5    $f18
+#define ft5f   $f19
+#define fs0    $f20
+#define fs0f   $f21
+#define fs1    $f22
+#define fs1f   $f23
+#define fs2    $f24
+#define fs2f   $f25
+#define fs3    $f26
+#define fs3f   $f27
+#define fs4    $f28
+#define fs4f   $f29
+#define fs5    $f30
+#define fs5f   $f31
+
+#ifndef MIPS32REVGE6
+#define fcc0   $fcc0
+#define fcc1   $fcc1
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+    lw   tmp, OFF_FP_CODE_ITEM(rFP) \
+    sw   rPC, OFF_FP_DEX_PC_PTR(rFP) \
+    addu tmp, CODEITEM_INSNS_OFFSET \
+    subu tmp, rPC, tmp \
+    sra  tmp, tmp, 1 \
+    sw   tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.  "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+    addu      rPC, rPC, ((_count) * 2)
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+    lhu       _dreg, ((_count)*2)(_sreg) ;            \
+    addu      _sreg, _sreg, (_count)*2
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd.  Updates
+ * rPC to point to the next instruction.  "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+    lhu       rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC.  The
+ * "_count" value is in 16-bit code units.  Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC.  Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) sll rd, rd, 7; \
+    addu      rd, rIBASE, rd; \
+    jalr      zero, rd
+
+#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, 7; \
+    addu      rd, _base, rd; \
+    jalr      zero, rd
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+
+#define SET_VREG64(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+
+#ifdef FPU64
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    s.s       rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#endif
+
+#define SET_VREG_OBJECT(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        rd, 0(t8)
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+    sll       dst, dst, 7; \
+    addu      dst, rIBASE, dst; \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
+    sll       dst, dst, 7; \
+    addu      dst, rIBASE, dst; \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+
+#define SET_VREG_F(rd, rix) .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+    sll       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+    srl       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    sw        AT, (off+4)(rbase); \
+    .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    .set noat; \
+    lw        AT, (off+4)(rbase); \
+    mthc1     AT, rlo; \
+    .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    s.s       rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    l.s       rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP    84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(ra, 124); \
+    STACK_STORE(s8, 120); \
+    STACK_STORE(s0, 116); \
+    STACK_STORE(s1, 112); \
+    STACK_STORE(s2, 108); \
+    STACK_STORE(s3, 104); \
+    STACK_STORE(s4, 100); \
+    STACK_STORE(s5, 96); \
+    STACK_STORE(s6, 92); \
+    STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+    STACK_LOAD(s7, 88); \
+    STACK_LOAD(s6, 92); \
+    STACK_LOAD(s5, 96); \
+    STACK_LOAD(s4, 100); \
+    STACK_LOAD(s3, 104); \
+    STACK_LOAD(s2, 108); \
+    STACK_LOAD(s1, 112); \
+    STACK_LOAD(s0, 116); \
+    STACK_LOAD(s8, 120); \
+    STACK_LOAD(ra, 124); \
+    DELETE_STACK(STACK_SIZE)
+
+/* File: mips/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .align 2
+    .global ExecuteMterpImpl
+    .ent    ExecuteMterpImpl
+    .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  code_item
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+    .set noreorder
+    .cpload t9
+    .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+    STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+    .cprestore STACK_OFFSET_GP
+
+    /* Remember the return register */
+    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the code_item */
+    sw      a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to insns[] (i.e. - the dalivk byte code).
+    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
+    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
+    addu    rPC, a1, CODEITEM_INSNS_OFFSET        # Point to base of insns[]
+    EAS1(rPC, rPC, a0)                            # Create direct pointer to 1st dex opcode
+
+    EXPORT_PC()
+
+    /* Starting ibase */
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+    /* start executing the instruction at rPC */
+    FETCH_INST()                           # load rINST from rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+    /* NOTE: no fallthrough */
+
+
+    .global artMterpAsmInstructionStart
+    .type   artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+/* ------------------------------ */
+    .balign 128
+.L_op_nop: /* 0x00 */
+/* File: mips/op_nop.S */
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move: /* 0x01 */
+/* File: mips/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    .if 0
+    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: mips/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: mips/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: mips/op_move_wide.S */
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: mips/op_move_wide_from16.S */
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 1)                           #  a3 <- BBBB
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: mips/op_move_wide_16.S */
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 2)                           #  a3 <- BBBB
+    FETCH(a2, 1)                           #  a2 <- AAAA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AAAA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: mips/op_move_object.S */
+/* File: mips/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    .if 1
+    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: mips/op_move_object_from16.S */
+/* File: mips/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: mips/op_move_object_16.S */
+/* File: mips/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: mips/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    lw    a0, 0(a0)                        #  a0 <- result.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    .else
+    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: mips/op_move_result_wide.S */
+    /* move-result-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: mips/op_move_result_object.S */
+/* File: mips/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
+    lw    a0, 0(a0)                        #  a0 <- result.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    .else
+    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: mips/op_move_exception.S */
+    /* move-exception vAA */
+    GET_OPA(a2)                                 #  a2 <- AA
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
+    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
+    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
+    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
+    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
+    GOTO_OPCODE(t0)                             #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: mips/op_return_void.S */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move      v0, zero
+    move      v1, zero
+    b         MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return: /* 0x0f */
+/* File: mips/op_return.S */
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(v0, a2)                       #  v0 <- vAA
+    move      v1, zero
+    b         MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: mips/op_return_wide.S */
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
+    b         MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: mips/op_return_object.S */
+/* File: mips/op_return.S */
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    JAL(MterpThreadFenceForConstructor)
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    move      a0, rSELF
+    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz      ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(v0, a2)                       #  v0 <- vAA
+    move      v1, zero
+    b         MterpReturn
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: mips/op_const_4.S */
+    # const/4 vA,                          /* +B */
+    sll       a1, rINST, 16                #  a1 <- Bxxx0000
+    GET_OPA(a0)                            #  a0 <- A+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
+    and       a0, a0, 15
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: mips/op_const_16.S */
+    # const/16 vAA,                        /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const: /* 0x14 */
+/* File: mips/op_const.S */
+    # const vAA,                           /* +BBBBbbbb */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a1, a1, 16
+    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: mips/op_const_high16.S */
+    # const/high16 vAA,                    /* +BBBB0000 */
+    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a0, a0, 16                   #  a0 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: mips/op_const_wide_16.S */
+    # const-wide/16 vAA,                   /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: mips/op_const_wide_32.S */
+    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a2, a2, 16
+    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: mips/op_const_wide.S */
+    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
+    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
+    sll       a1, 16 #
+    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    FETCH(a3, 4)                           #  a3 <- HHHH (high)
+    GET_OPA(t1)                            #  t1 <- AA
+    sll       a3, 16
+    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, t1)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: mips/op_const_wide_high16.S */
+    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    li        a0, 0                        #  a0 <- 00000000
+    sll       a1, 16                       #  a1 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: mips/op_const_string.S */
+    # const/string vAA, String             /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- BBBB
+    GET_OPA(a1)                         # a1 <- AA
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(2)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: mips/op_const_string_jumbo.S */
+    # const/string vAA, String          /* BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- bbbb (low)
+    FETCH(a2, 2)                        # a2 <- BBBB (high)
+    GET_OPA(a1)                         # a1 <- AA
+    sll    a2, a2, 16
+    or     a0, a0, a2                   # a0 <- BBBBbbbb
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST(3)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(3)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: mips/op_const_class.S */
+    # const/class vAA, Class               /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                        # a0 <- BBBB
+    GET_OPA(a1)                         # a1 <- AA
+    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
+    move   a3, rSELF
+    JAL(MterpConstClass)
+    PREFETCH_INST(2)                    # load rINST
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)                          # advance rPC
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: mips/op_monitor_enter.S */
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: mips/op_monitor_exit.S */
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    EXPORT_PC()
+    GET_OPA(a2)                            # a2 <- AA
+    GET_VREG(a0, a2)                       # a0 <- vAA (object)
+    move   a1, rSELF                       # a1 <- self
+    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
+    bnez v0, MterpException
+    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: mips/op_check_cast.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    # check-cast vAA, class                /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           #  a0 <- BBBB
+    GET_OPA(a1)                            #  a1 <- AA
+    EAS2(a1, rFP, a1)                      #  a1 <- &object
+    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
+    move   a3, rSELF                       #  a3 <- self
+    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
+    PREFETCH_INST(2)
+    bnez   v0, MterpPossibleException
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: mips/op_instance_of.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    # instance-of vA, vB, class            /* CCCC */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- CCCC
+    GET_OPB(a1)                            # a1 <- B
+    EAS2(a1, rFP, a1)                      # a1 <- &object
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    GET_OPA4(rOBJ)                         # rOBJ <- A+
+    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       # load rINST
+    bnez a1, MterpException
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: mips/op_array_length.S */
+    /*
+     * Return the length of an array.
+     */
+    GET_OPB(a1)                            #  a1 <- B
+    GET_OPA4(a2)                           #  a2 <- A+
+    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
+    # is object null?
+    beqz      a0, common_errNullObject     #  yup, fail
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: mips/op_new_instance.S */
+    /*
+     * Create a new instance of a class.
+     */
+    # new-instance vAA, class              /* BBBB */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rSELF
+    move   a2, rINST
+    JAL(MterpNewInstance)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: mips/op_new_array.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpNewArray)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: mips/op_filled_new_array.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    .extern MterpFilledNewArray
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
+    move   a1, rPC
+    move   a2, rSELF
+    JAL(MterpFilledNewArray)                           #  v0 <- helper(shadow_frame, pc, self)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: mips/op_filled_new_array_range.S */
+/* File: mips/op_filled_new_array.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    .extern MterpFilledNewArrayRange
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
+    move   a1, rPC
+    move   a2, rSELF
+    JAL(MterpFilledNewArrayRange)                           #  v0 <- helper(shadow_frame, pc, self)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: mips/op_fill_array_data.S */
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
+    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
+    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
+    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
+    beqz      v0,  MterpPossibleException  #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_throw: /* 0x27 */
+/* File: mips/op_throw.S */
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC()                              #  exception handler can throw
+    GET_OPA(a2)                              #  a2 <- AA
+    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
+    # null object?
+    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
+    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
+    b         MterpException
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto: /* 0x28 */
+/* File: mips/op_goto.S */
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+#if MTERP_PROFILE_BRANCHES
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a2, rINST, rINST             #  a2 <- byte offset
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    /* If backwards branch refresh rIBASE */
+    bgez      a2, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
+    addu      a2, rINST, rINST             #  a2 <- byte offset
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    /* If backwards branch refresh rIBASE */
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: mips/op_goto_16.S */
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+#if MTERP_PROFILE_BRANCHES
+    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
+    addu      a1, rINST, rINST             #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: mips/op_goto_32.S */
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+#if MTERP_PROFILE_BRANCHES
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: mips/op_packed_switch.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(MterpDoPackedSwitch)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, .Lop_packed_switch_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+#else
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(MterpDoPackedSwitch)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: mips/op_sparse_switch.S */
+/* File: mips/op_packed_switch.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(MterpDoSparseSwitch)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, .Lop_sparse_switch_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+#else
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(MterpDoSparseSwitch)                             #  a0 <- code-unit branch offset
+    move      rINST, v0
+    addu      a1, rINST, rINST             #  a1 <- byte offset
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a1, 1f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: mips/op_cmpl_float.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register rTEMP based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1 or 1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+    GET_VREG_F(ft0, a2)
+    GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+    cmp.ult.s ft2, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .Lop_cmpl_float_finish
+    cmp.ult.s ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .Lop_cmpl_float_finish
+    cmp.eq.s  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .Lop_cmpl_float_finish
+    b         .Lop_cmpl_float_nan
+#else
+    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .Lop_cmpl_float_finish
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .Lop_cmpl_float_finish
+    c.eq.s    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .Lop_cmpl_float_finish
+    b         .Lop_cmpl_float_nan
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: mips/op_cmpg_float.S */
+/* File: mips/op_cmpl_float.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register rTEMP based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1 or 1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+    GET_VREG_F(ft0, a2)
+    GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+    cmp.ult.s ft2, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .Lop_cmpg_float_finish
+    cmp.ult.s ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .Lop_cmpg_float_finish
+    cmp.eq.s  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .Lop_cmpg_float_finish
+    b         .Lop_cmpg_float_nan
+#else
+    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .Lop_cmpg_float_finish
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .Lop_cmpg_float_finish
+    c.eq.s    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .Lop_cmpg_float_finish
+    b         .Lop_cmpg_float_nan
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: mips/op_cmpl_double.S */
+    /*
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register (rTEMP) based on the comparison results.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See op_cmpl_float for more details.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s5 <- BB
+    srl       t0, a0, 8                    #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
+    LOAD64_F(ft0, ft0f, rOBJ)
+    LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+    cmp.ult.d ft2, ft0, ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .Lop_cmpl_double_finish
+    cmp.ult.d ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .Lop_cmpl_double_finish
+    cmp.eq.d  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .Lop_cmpl_double_finish
+    b         .Lop_cmpl_double_nan
+#else
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .Lop_cmpl_double_finish
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .Lop_cmpl_double_finish
+    c.eq.d    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .Lop_cmpl_double_finish
+    b         .Lop_cmpl_double_nan
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: mips/op_cmpg_double.S */
+/* File: mips/op_cmpl_double.S */
+    /*
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register (rTEMP) based on the comparison results.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See op_cmpl_float for more details.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s5 <- BB
+    srl       t0, a0, 8                    #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
+    LOAD64_F(ft0, ft0f, rOBJ)
+    LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+    cmp.ult.d ft2, ft0, ft1
+    li        rTEMP, -1
+    bc1nez    ft2, .Lop_cmpg_double_finish
+    cmp.ult.d ft2, ft1, ft0
+    li        rTEMP, 1
+    bc1nez    ft2, .Lop_cmpg_double_finish
+    cmp.eq.d  ft2, ft0, ft1
+    li        rTEMP, 0
+    bc1nez    ft2, .Lop_cmpg_double_finish
+    b         .Lop_cmpg_double_nan
+#else
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, .Lop_cmpg_double_finish
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, .Lop_cmpg_double_finish
+    c.eq.d    fcc0, ft0, ft1
+    li        rTEMP, 0
+    bc1t      fcc0, .Lop_cmpg_double_finish
+    b         .Lop_cmpg_double_nan
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: mips/op_cmp_long.S */
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    slt       t0, a1, a3                   #  compare hi
+    sgt       t1, a1, a3
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
+    bnez      v0, .Lop_cmp_long_finish
+    # at this point x.hi==y.hi
+    sltu      t0, a0, a2                   #  compare lo
+    sgtu      t1, a0, a2
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
+
+.Lop_cmp_long_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: mips/op_if_eq.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bne a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_eq_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: mips/op_if_ne.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    beq a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_ne_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: mips/op_if_lt.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bge a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_lt_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: mips/op_if_ge.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    blt a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_ge_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: mips/op_if_gt.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    ble a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_gt_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: mips/op_if_le.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bgt a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a2, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+    bgez      a2, .L_op_if_le_finish
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: mips/op_if_eqz.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    bne a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: mips/op_if_nez.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    beq a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: mips/op_if_ltz.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    bge a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: mips/op_if_gez.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    blt a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: mips/op_if_gtz.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    ble a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: mips/op_if_lez.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
+    bgt a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        rINST, 2                     #  rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+    EXPORT_PC()
+    move      a0, rSELF
+    addu      a1, rFP, OFF_FP_SHADOWFRAME
+    move      a2, rINST
+    JAL(MterpProfileBranch)                #  (self, shadow_frame, offset)
+    bnez      v0, MterpOnStackReplacement  #  Note: offset must be in rINST
+#endif
+    addu      a1, rINST, rINST             #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 3f
+    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
+    b         MterpCheckSuspendAndContinue
+3:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: mips/op_unused_3e.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: mips/op_unused_3f.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: mips/op_unused_40.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: mips/op_unused_41.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: mips/op_unused_42.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: mips/op_unused_43.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget: /* 0x44 */
+/* File: mips/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 2
+    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: mips/op_aget_wide.S */
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: mips/op_aget_object.S */
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
+    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a1, MterpException
+    SET_VREG_OBJECT(v0, rOBJ)              #  vAA <- v0
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: mips/op_aget_boolean.S */
+/* File: mips/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: mips/op_aget_byte.S */
+/* File: mips/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: mips/op_aget_char.S */
+/* File: mips/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: mips/op_aget_short.S */
+/* File: mips/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)             #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput: /* 0x4b */
+/* File: mips/op_aput.S */
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 2
+    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: mips/op_aput_wide.S */
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t0 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: mips/op_aput_object.S */
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpAputObject)
+    beqz   v0, MterpPossibleException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: mips/op_aput_boolean.S */
+/* File: mips/op_aput.S */
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: mips/op_aput_byte.S */
+/* File: mips/op_aput.S */
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: mips/op_aput_char.S */
+/* File: mips/op_aput.S */
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: mips/op_aput_short.S */
+/* File: mips/op_aput.S */
+
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1, 0)                      #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_B(a3, 1, 1)                      #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget: /* 0x52 */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGet32InstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: mips/op_iget_wide.S */
+    /*
+     * 64-bit instance field get.
+     *
+     * for: iget-wide
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field byte offset
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGet64InstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez a3, MterpException                # bail out
+    SET_VREG64(v0, v1, a2)                 # fp[A] <- v0/v1
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: mips/op_iget_object.S */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGetObjInstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 1
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: mips/op_iget_boolean.S */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGetBooleanInstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: mips/op_iget_byte.S */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGetByteInstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: mips/op_iget_char.S */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGetCharInstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: mips/op_iget_short.S */
+/* File: mips/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- referrer
+    move  a3, rSELF                        # a3 <- self
+    JAL(artGetShortInstanceFromCode)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           # a2<- A+
+    PREFETCH_INST(2)                       # load rINST
+    bnez  a3, MterpPossibleException        # bail out
+    .if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
+    .else
+    SET_VREG(v0, a2)                       # fp[A] <- v0
+    .endif
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput: /* 0x59 */
+/* File: mips/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern artSet32InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet32InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: mips/op_iput_wide.S */
+    # iput-wide vA, vB, field              /* CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    EAS2(a2, rFP, a2)                      # a2 <- &fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet64InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: mips/op_iput_object.S */
+    /*
+     * 32-bit instance field put.
+     *
+     * for: iput-object, iput-object-volatile
+     */
+    # op vA, vB, field                     /* CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpIputObject)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: mips/op_iput_boolean.S */
+/* File: mips/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet8InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: mips/op_iput_byte.S */
+/* File: mips/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet8InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: mips/op_iput_char.S */
+/* File: mips/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet16InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: mips/op_iput_short.S */
+/* File: mips/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    GET_OPB(a1)                            # a1 <- B
+    GET_VREG(a1, a1)                       # a1 <- fp[B], the object pointer
+    GET_OPA4(a2)                           # a2 <- A+
+    GET_VREG(a2, a2)                       # a2 <- fp[A]
+    lw    a3, OFF_FP_METHOD(rFP)           # a3 <- referrer
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet16InstanceFromMterp)
+    bnez  v0, MterpPossibleException       # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget: /* 0x60 */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGet32StaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGet32StaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: mips/op_sget_wide.S */
+    /*
+     * 64-bit SGET handler.
+     */
+    # sget-wide vAA, field                 /* BBBB */
+    .extern artGet64StaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGet64StaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    bnez  a3, MterpException
+    GET_OPA(a1)                            # a1 <- AA
+    FETCH_ADVANCE_INST(2)                  # advance rPC, load rINST
+    SET_VREG64(v0, v1, a1)                 # vAA/vAA+1 <- v0/v1
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: mips/op_sget_object.S */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGetObjStaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGetObjStaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 1
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: mips/op_sget_boolean.S */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGetBooleanStaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGetBooleanStaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: mips/op_sget_byte.S */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGetByteStaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGetByteStaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: mips/op_sget_char.S */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGetCharStaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGetCharStaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: mips/op_sget_short.S */
+/* File: mips/op_sget.S */
+    /*
+     * General SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    .extern artGetShortStaticFromCode
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    move  a2, rSELF                        # a2 <- self
+    JAL(artGetShortStaticFromCode)
+    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA(a2)                            # a2 <- AA
+    PREFETCH_INST(2)
+    bnez  a3, MterpException               # bail out
+.if 0
+    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
+.else
+    SET_VREG(v0, a2)                       # fp[AA] <- v0
+.endif
+    ADVANCE(2)
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput: /* 0x67 */
+/* File: mips/op_sput.S */
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet32StaticFromCode)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: mips/op_sput_wide.S */
+    /*
+     * 64-bit SPUT handler.
+     */
+    # sput-wide vAA, field                 /* BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref CCCC
+    lw    a1, OFF_FP_METHOD(rFP)           # a1 <- method
+    GET_OPA(a2)                            # a2 <- AA
+    EAS2(a2, rFP, a2)                      # a2 <- &fp[AA]
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet64IndirectStaticFromMterp)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: mips/op_sput_object.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput-object,
+     */
+    /* op vAA, field@BBBB */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    move   a3, rSELF
+    JAL(MterpSputObject)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: mips/op_sput_boolean.S */
+/* File: mips/op_sput.S */
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet8StaticFromCode)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: mips/op_sput_byte.S */
+/* File: mips/op_sput.S */
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet8StaticFromCode)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: mips/op_sput_char.S */
+/* File: mips/op_sput.S */
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet16StaticFromCode)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: mips/op_sput_short.S */
+/* File: mips/op_sput.S */
+    /*
+     * General SPUT handler.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    EXPORT_PC()
+    FETCH(a0, 1)                           # a0 <- field ref BBBB
+    GET_OPA(a3)                            # a3 <- AA
+    GET_VREG(a1, a3)                       # a1 <- fp[AA], the object pointer
+    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
+    move  a3, rSELF                        # a3 <- self
+    PREFETCH_INST(2)                       # load rINST
+    JAL(artSet16StaticFromCode)
+    bnez  v0, MterpException               # bail out
+    ADVANCE(2)                             # advance rPC
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)                        # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: mips/op_invoke_virtual.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeVirtual
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeVirtual)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: mips/op_invoke_super.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeSuper
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeSuper)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: mips/op_invoke_direct.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeDirect
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeDirect)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: mips/op_invoke_static.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeStatic
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeStatic)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: mips/op_invoke_interface.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeInterface
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeInterface)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: mips/op_return_void_no_barrier.S */
+    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
+    move   a0, rSELF
+    and    ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqz   ra, 1f
+    JAL(MterpSuspendCheck)                 # (self)
+1:
+    move   v0, zero
+    move   v1, zero
+    b      MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: mips/op_invoke_virtual_range.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeVirtualRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeVirtualRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: mips/op_invoke_super_range.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeSuperRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeSuperRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: mips/op_invoke_direct_range.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeDirectRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeDirectRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: mips/op_invoke_static_range.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeStaticRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeStaticRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: mips/op_invoke_interface_range.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeInterfaceRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeInterfaceRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: mips/op_unused_79.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: mips/op_unused_7a.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: mips/op_neg_int.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    negu a0, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: mips/op_not_int.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    not a0, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: mips/op_neg_long.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double,
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    negu v0, a0                              #  optional op
+    negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, rOBJ)   #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: mips/op_not_long.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double,
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    not a0, a0                              #  optional op
+    not a1, a1                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: mips/op_neg_float.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a0, a0, 0x80000000                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: mips/op_neg_double.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double,
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a1, a1, 0x80000000                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: mips/op_int_to_long.S */
+/* File: mips/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    sra a1, a0, 31                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vA/vA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: mips/op_int_to_float.S */
+/* File: mips/funop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.s.w fv0, fa0
+
+.Lop_int_to_float_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: mips/op_int_to_double.S */
+/* File: mips/funopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.d.w fv0, fa0
+
+.Lop_int_to_double_set_vreg:
+    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: mips/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: mips/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    .if 0
+    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    .else
+    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    .endif
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: mips/op_long_to_float.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter,
+     * except for long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(__floatdisf)
+
+.Lop_long_to_float_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: mips/op_long_to_double.S */
+/* File: mips/funopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    JAL(__floatdidf)                                 #  a0/a1 <- op, a2-a3 changed
+
+.Lop_long_to_double_set_vreg:
+    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vAA <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: mips/op_float_to_int.S */
+/* File: mips/funop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    b f2i_doconv
+
+.Lop_float_to_int_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: mips/op_float_to_long.S */
+/* File: mips/funopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    b f2l_doconv
+
+.Lop_float_to_long_set_vreg:
+    SET_VREG64(rRESULT0, rRESULT1, rOBJ)                             #  vA/vA+1 <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: mips/op_float_to_double.S */
+/* File: mips/funopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.d.s fv0, fa0
+
+.Lop_float_to_double_set_vreg:
+    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: mips/op_double_to_int.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter,
+     * except for long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    b d2i_doconv
+
+.Lop_double_to_int_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: mips/op_double_to_long.S */
+/* File: mips/funopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    b d2l_doconv                                 #  a0/a1 <- op, a2-a3 changed
+
+.Lop_double_to_long_set_vreg:
+    SET_VREG64(rRESULT0, rRESULT1, rOBJ)                             #  vAA <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: mips/op_double_to_float.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter,
+     * except for long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.s.d fv0, fa0
+
+.Lop_double_to_float_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: mips/op_int_to_byte.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sll a0, a0, 24                              #  optional op
+    sra a0, a0, 24                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: mips/op_int_to_char.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    and a0, 0xffff                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: mips/op_int_to_short.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sll a0, 16                              #  optional op
+    sra a0, 16                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: mips/op_add_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: mips/op_sub_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: mips/op_mul_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: mips/op_div_int.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+#else
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    div zero, a0, a1                              #  optional op
+    mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: mips/op_rem_int.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+#else
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    div zero, a0, a1                              #  optional op
+    mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: mips/op_and_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: mips/op_or_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: mips/op_xor_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: mips/op_shl_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: mips/op_shr_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: mips/op_ushr_int.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: mips/op_add_long.S */
+/*
+ *  The compiler generates the following sequence for
+ *  [v1 v0] =  [a1 a0] + [a3 a2];
+ *    addu v0,a2,a0
+ *    addu a1,a3,a1
+ *    sltu v1,v0,a2
+ *    addu v1,v1,a1
+ */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    addu v0, a2, a0                              #  optional op
+    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: mips/op_sub_long.S */
+/*
+ * For little endian the code sequence looks as follows:
+ *    subu    v0,a0,a2
+ *    subu    v1,a1,a3
+ *    sltu    a0,a0,v0
+ *    subu    v1,v1,a0
+ */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    subu v0, a0, a2                              #  optional op
+    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: mips/op_mul_long.S */
+    /*
+     * Signed 64-bit integer multiply.
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       t0, a0, 255                  #  a2 <- BB
+    srl       t1, a0, 8                    #  a3 <- CC
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
+    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+#endif
+    mul       t0, a2, a1                   #  t0= a2a1
+    addu      v1, v1, t1                   #  v1+= hi(a2a0)
+    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
+
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    b         .Lop_mul_long_finish
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: mips/op_div_long.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: mips/op_rem_long.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: mips/op_and_long.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a0, a0, a2                              #  optional op
+    and a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: mips/op_or_long.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    or a0, a0, a2                              #  optional op
+    or a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: mips/op_xor_long.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    xor a0, a0, a2                              #  optional op
+    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
+    /* 14-17 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: mips/op_shl_long.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t2)                            #  t2 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .Lop_shl_long_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: mips/op_shr_long.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t3)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .Lop_shr_long_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: mips/op_ushr_long.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .Lop_ushr_long_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: mips/op_add_float.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    add.s fv0, fa0, fa1                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: mips/op_sub_float.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    sub.s fv0, fa0, fa1                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: mips/op_mul_float.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    mul.s fv0, fa0, fa1                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: mips/op_div_float.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    div.s fv0, fa0, fa1                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: mips/op_rem_float.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    JAL(fmodf)                                 #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double: /* 0xab */
+/* File: mips/op_add_double.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    add.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .Lop_add_double_finish
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: mips/op_sub_double.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    sub.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .Lop_sub_double_finish
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: mips/op_mul_double.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    mul.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .Lop_mul_double_finish
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double: /* 0xae */
+/* File: mips/op_div_double.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    div.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .Lop_div_double_finish
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: mips/op_rem_double.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * for: add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    JAL(fmod)
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    b         .Lop_rem_double_finish
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: mips/op_add_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: mips/op_sub_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: mips/op_mul_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: mips/op_div_int_2addr.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#else
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: mips/op_rem_int_2addr.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#else
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: mips/op_and_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: mips/op_or_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: mips/op_xor_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: mips/op_shl_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: mips/op_shr_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: mips/op_ushr_int_2addr.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    srl a0, a0, a1                                  #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: mips/op_add_long_2addr.S */
+/*
+ * See op_add_long.S for details
+ */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    addu v0, a2, a0                              #  optional op
+    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: mips/op_sub_long_2addr.S */
+/*
+ * See op_sub_long.S for more details
+ */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    subu v0, a0, a2                              #  optional op
+    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: mips/op_mul_long_2addr.S */
+    /*
+     * See op_mul_long.S for more details
+     */
+    /* mul-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  vAA.low / high
+
+    GET_OPB(t1)                            #  t1 <- B
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
+    LOAD64(a2, a3, t1)                     #  vBB.low / high
+
+    mul       v1, a3, a0                   #  v1= a3a0
+#ifdef MIPS32REVGE6
+    mulu      v0, a2, a0                   #  v0= a2a0
+    muhu      t1, a2, a0
+#else
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+ #endif
+    mul       t2, a2, a1                   #  t2= a2a1
+    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
+    addu      v1, v1, t2                   #  v1= v1 + a2a1;
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    # vAA <- v0 (low)
+    SET_VREG64(v0, v1, rOBJ)               #  vAA+1 <- v1 (high)
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: mips/op_div_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: mips/op_rem_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: mips/op_and_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    and a0, a0, a2                              #  optional op
+    and a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: mips/op_or_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    or a0, a0, a2                              #  optional op
+    or a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: mips/op_xor_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    xor a0, a0, a2                              #  optional op
+    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: mips/op_shl_long_2addr.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v1, a2, 0x20                   #  shift< shift & 0x20
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    bnez    v1, .Lop_shl_long_2addr_finish
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: mips/op_shr_long_2addr.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi    v0, a2, 0x20                   #  shift & 0x20
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    bnez    v0, .Lop_shr_long_2addr_finish
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips/op_ushr_long_2addr.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    GET_OPA4(t3)                           #  t3 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+
+    andi      v0, a2, 0x20                 #  shift & 0x20
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    bnez      v0, .Lop_ushr_long_2addr_finish
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: mips/op_add_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    add.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: mips/op_sub_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    sub.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: mips/op_mul_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    mul.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: mips/op_div_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    div.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: mips/op_rem_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    JAL(fmodf)
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: mips/op_add_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    add.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: mips/op_sub_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sub.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: mips/op_mul_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    mul.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: mips/op_div_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    div.d fv0, fa0, fa1
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: mips/op_rem_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
+    LOAD64_F(fa0, fa0f, t0)
+    LOAD64_F(fa1, fa1f, a1)
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(fmod)
+    SET_VREG64_F(fv0, fv0f, rOBJ)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: mips/op_add_int_lit16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: mips/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: mips/op_mul_int_lit16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: mips/op_div_int_lit16.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#else
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: mips/op_rem_int_lit16.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#else
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: mips/op_and_int_lit16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: mips/op_or_int_lit16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: mips/op_xor_int_lit16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: mips/op_add_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips/op_rsub_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: mips/op_mul_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: mips/op_div_int_lit8.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+#else
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: mips/op_rem_int_lit8.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+#else
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1                              #  optional op
+    mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: mips/op_and_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: mips/op_or_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: mips/op_xor_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: mips/op_shl_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: mips/op_shr_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips/op_ushr_int_lit8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: mips/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    lw     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: mips/op_iget_wide_quick.S */
+    # iget-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1                   #  t0 <- a3 + a1
+    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: mips/op_iget_object_quick.S */
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    EXPORT_PC()
+    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
+    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
+    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    GET_OPA4(a2)                           #  a2<- A+
+    PREFETCH_INST(2)                       #  load rINST
+    bnez a3, MterpPossibleException        #  bail out
+    SET_VREG_OBJECT(v0, a2)                #  fp[A] <- v0
+    ADVANCE(2)                             #  advance rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: mips/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sw    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: mips/op_iput_wide_quick.S */
+    # iput-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPA4(a0)                           #  a0 <- A(+)
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
+    # check object for null
+    beqz      a2, common_errNullObject     #  object was null
+    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
+    FETCH(a3, 1)                           #  a3 <- field byte offset
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: mips/op_iput_object_quick.S */
+    /* For: iput-object-quick */
+    # op vA, vB, offset                 /* CCCC */
+    EXPORT_PC()
+    addu   a0, rFP, OFF_FP_SHADOWFRAME
+    move   a1, rPC
+    move   a2, rINST
+    JAL(MterpIputObjectQuick)
+    beqz   v0, MterpException
+    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips/op_invoke_virtual_quick.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeVirtualQuick
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeVirtualQuick)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips/op_invoke_virtual_range_quick.S */
+/* File: mips/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    .extern MterpInvokeVirtualQuickRange
+    EXPORT_PC()
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    JAL(MterpInvokeVirtualQuickRange)
+    beqz    v0, MterpException
+    FETCH_ADVANCE_INST(3)
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: mips/op_iput_boolean_quick.S */
+/* File: mips/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: mips/op_iput_byte_quick.S */
+/* File: mips/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: mips/op_iput_char_quick.S */
+/* File: mips/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: mips/op_iput_short_quick.S */
+/* File: mips/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: mips/op_iget_boolean_quick.S */
+/* File: mips/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    lbu     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: mips/op_iget_byte_quick.S */
+/* File: mips/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    lb     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: mips/op_iget_char_quick.S */
+/* File: mips/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    lhu     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: mips/op_iget_short_quick.S */
+/* File: mips/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1
+    lh     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: mips/op_unused_f4.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+    b    MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: mips/op_unused_fa.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: mips/op_unused_fb.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: mips/op_unused_fc.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: mips/op_unused_fd.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: mips/op_unused_fe.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: mips/op_unused_ff.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+  b MterpFallback
+
+
+    .balign 128
+    .size   artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global artMterpAsmSisterStart
+    .type   artMterpAsmSisterStart, %function
+    .text
+    .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_packed_switch */
+
+.Lop_packed_switch_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_sparse_switch */
+
+.Lop_sparse_switch_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_cmpl_float */
+
+.Lop_cmpl_float_nan:
+    li rTEMP, -1
+
+.Lop_cmpl_float_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for op_cmpg_float */
+
+.Lop_cmpg_float_nan:
+    li rTEMP, 1
+
+.Lop_cmpg_float_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for op_cmpl_double */
+
+.Lop_cmpl_double_nan:
+    li rTEMP, -1
+
+.Lop_cmpl_double_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for op_cmpg_double */
+
+.Lop_cmpg_double_nan:
+    li rTEMP, 1
+
+.Lop_cmpg_double_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for op_if_eq */
+
+.L_op_if_eq_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_if_ne */
+
+.L_op_if_ne_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_if_lt */
+
+.L_op_if_lt_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_if_ge */
+
+.L_op_if_ge_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_if_gt */
+
+.L_op_if_gt_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_if_le */
+
+.L_op_if_le_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_float_to_int */
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef MIPS32REVGE6
+    l.s       fa1, .LFLOAT_TO_INT_max
+    cmp.ule.s ft2, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    cmp.ule.s ft2, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
+
+    mov.s     fa1, fa0
+    cmp.un.s  ft2, fa0, fa1
+    li.s      fv0, 0
+    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
+#else
+    l.s       fa1, .LFLOAT_TO_INT_max
+    c.ole.s   fcc0, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1t      .Lop_float_to_int_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    c.ole.s   fcc0, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1t      .Lop_float_to_int_set_vreg_f
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .Lop_float_to_int_set_vreg_f
+#endif
+
+    trunc.w.s  fv0, fa0
+    b         .Lop_float_to_int_set_vreg_f
+
+.LFLOAT_TO_INT_max:
+    .word 0x4f000000
+.LFLOAT_TO_INT_min:
+    .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+    .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+    .word 0x80000000
+
+/* continuation for op_float_to_long */
+
+f2l_doconv:
+#ifdef MIPS32REVGE6
+    l.s       fa1, .LLONG_TO_max
+    cmp.ule.s ft2, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1nez    ft2, .Lop_float_to_long_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    cmp.ule.s ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1nez    ft2, .Lop_float_to_long_set_vreg
+
+    mov.s     fa1, fa0
+    cmp.un.s  ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1nez    ft2, .Lop_float_to_long_set_vreg
+#else
+    l.s       fa1, .LLONG_TO_max
+    c.ole.s   fcc0, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1t      .Lop_float_to_long_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1t      .Lop_float_to_long_set_vreg
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .Lop_float_to_long_set_vreg
+#endif
+
+    JAL(__fixsfdi)
+
+    b         .Lop_float_to_long_set_vreg
+
+.LLONG_TO_max:
+    .word 0x5f000000
+
+.LLONG_TO_min:
+    .word 0xdf000000
+
+/* continuation for op_double_to_int */
+
+d2i_doconv:
+#ifdef MIPS32REVGE6
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
+
+    mov.d     fa1, fa0
+    cmp.un.d  ft2, fa0, fa1
+    li.s      fv0, 0
+    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
+#else
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1t      .Lop_double_to_int_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1t      .Lop_double_to_int_set_vreg_f
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .Lop_double_to_int_set_vreg_f
+#endif
+
+    trunc.w.d  fv0, fa0
+    b         .Lop_double_to_int_set_vreg_f
+
+.LDOUBLE_TO_INT_max:
+    .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword 0xc1e0000000000000              #  minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word 0x80000000
+
+/* continuation for op_double_to_long */
+
+d2l_doconv:
+#ifdef MIPS32REVGE6
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1nez    ft2, .Lop_double_to_long_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    cmp.ule.d ft2, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1nez    ft2, .Lop_double_to_long_set_vreg
+
+    mov.d     fa1, fa0
+    cmp.un.d  ft2, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1nez    ft2, .Lop_double_to_long_set_vreg
+#else
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .Lop_double_to_long_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .Lop_double_to_long_set_vreg
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .Lop_double_to_long_set_vreg
+#endif
+    JAL(__fixdfdi)
+    b         .Lop_double_to_long_set_vreg
+
+.LDOUBLE_TO_LONG_max:
+    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+    .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+    .dword 0x8000000000000000
+
+/* continuation for op_mul_long */
+
+.Lop_mul_long_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64(v0, v1, a0)                 #  vAA::vAA+1 <- v0(low) :: v1(high)
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_shl_long */
+
+.Lop_shl_long_finish:
+    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_shr_long */
+
+.Lop_shr_long_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
+
+/* continuation for op_ushr_long */
+
+.Lop_ushr_long_finish:
+    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_add_double */
+
+.Lop_add_double_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_sub_double */
+
+.Lop_sub_double_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_mul_double */
+
+.Lop_mul_double_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_div_double */
+
+.Lop_div_double_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_rem_double */
+
+.Lop_rem_double_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for op_shl_long_2addr */
+
+.Lop_shl_long_2addr_finish:
+    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_shr_long_2addr */
+
+.Lop_shr_long_2addr_finish:
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_ushr_long_2addr */
+
+.Lop_ushr_long_2addr_finish:
+    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vAA/vAA+1 <- rlo/rhi
+
+    .size   artMterpAsmSisterStart, .-artMterpAsmSisterStart
+    .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+    .global artMterpAsmAltInstructionStart
+    .type   artMterpAsmAltInstructionStart, %function
+    .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (0 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (1 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (2 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (3 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (4 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (5 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (6 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (7 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (8 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (9 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (10 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (11 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (12 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (13 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (14 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (15 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (16 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (17 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (18 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (19 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (20 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (21 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (22 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (23 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (24 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (25 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (26 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (27 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (28 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (29 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (30 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (31 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (32 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (33 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (34 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (35 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (36 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (37 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (38 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (39 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (40 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (41 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (42 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (43 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (44 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (45 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (46 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (47 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (48 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (49 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (50 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (51 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (52 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (53 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (54 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (55 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (56 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (57 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (58 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (59 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (60 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (61 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (62 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (63 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (64 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (65 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (66 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (67 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (68 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (69 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (70 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (71 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (72 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (73 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (74 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (75 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (76 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (77 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (78 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (79 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (80 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (81 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (82 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (83 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (84 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (85 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (86 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (87 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (88 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (89 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (90 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (91 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (92 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (93 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (94 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (95 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (96 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (97 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (98 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (99 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (100 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (101 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (102 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (103 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (104 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (105 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (106 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (107 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (108 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (109 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (110 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (111 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (112 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (113 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (114 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (115 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (116 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (117 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (118 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (119 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (120 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (121 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (122 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (123 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (124 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (125 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (126 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (127 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (128 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (129 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (130 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (131 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (132 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (133 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (134 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (135 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (136 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (137 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (138 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (139 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (140 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (141 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (142 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (143 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (144 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (145 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (146 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (147 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (148 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (149 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (150 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (151 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (152 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (153 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (154 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (155 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (156 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (157 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (158 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (159 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (160 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (161 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (162 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (163 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (164 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (165 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (166 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (167 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (168 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (169 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (170 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (171 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (172 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (173 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (174 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (175 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (176 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (177 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (178 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (179 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (180 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (181 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (182 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (183 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (184 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (185 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (186 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (187 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (188 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (189 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (190 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (191 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (192 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (193 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (194 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (195 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (196 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (197 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (198 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (199 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (200 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (201 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (202 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (203 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (204 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (205 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (206 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (207 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (208 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (209 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (210 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (211 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (212 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (213 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (214 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (215 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (216 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (217 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (218 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (219 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (220 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (221 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (222 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (223 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (224 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (225 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (226 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (227 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (228 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (229 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (230 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (231 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (232 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (233 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (234 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (235 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (236 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (237 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (238 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (239 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (240 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (241 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (242 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (243 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (244 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (245 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (246 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (247 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (248 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (249 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (250 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (251 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (252 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (253 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (254 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC()
+    la     ra, artMterpAsmInstructionStart + (255 * 128)   # Addr of primary handler
+    lw     rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)           # refresh IBASE
+    move   a0, rSELF                    # arg0
+    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
+    la     a2, MterpCheckBefore
+    jalr   zero, a2                     # Tail call to Mterp(self, shadow_frame)
+
+    .balign 128
+    .size   artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+    .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: mips/footer.S */
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogDivideByZeroException)
+#endif
+    b MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogArrayIndexException)
+#endif
+    b MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNegativeArraySizeException)
+#endif
+    b MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNoSuchMethodException)
+#endif
+    b MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogNullObjectException)
+#endif
+    b MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogExceptionThrownException)
+#endif
+    b MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
+    JAL(MterpLogSuspendFallback)
+#endif
+    b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpHandleException)                    # (self, shadow_frame)
+    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
+    lw      a0, OFF_FP_CODE_ITEM(rFP)
+    lw      a1, OFF_FP_DEX_PC(rFP)
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+    addu    rPC, a0, CODEITEM_INSNS_OFFSET
+    sll     a1, a1, 1
+    addu    rPC, rPC, a1                         # generate new dex_pc_ptr
+    /* Do we need to switch interpreters? */
+    JAL(MterpShouldSwitchInterpreters)
+    bnez    v0, MterpFallback
+    /* resume execution at catch block */
+    EXPORT_PC()
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)  # refresh rIBASE
+    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    bnez    ra, 1f
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+1:
+    EXPORT_PC()
+    move    a0, rSELF
+    JAL(MterpSuspendCheck)              # (self)
+    bnez    v0, MterpFallback
+    GET_INST_OPCODE(t0)                 # extract opcode from rINST
+    GOTO_OPCODE(t0)                     # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    move    a0, rSELF
+    addu    a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rINST
+    JAL(MterpLogOSR)
+#endif
+    li      v0, 1                       # Signal normal return
+    b       MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC()
+#if MTERP_LOGGING
+    move  a0, rSELF
+    addu  a1, rFP, OFF_FP_SHADOWFRAME
+    JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    move    v0, zero                    # signal retry with reference interpreter.
+    b       MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR.  Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                       # signal return to caller.
+    b       MterpDone
+MterpReturn:
+    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
+    sw      v0, 0(a2)
+    sw      v1, 4(a2)
+    li      v0, 1                       # signal return to caller.
+MterpDone:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+    STACK_LOAD_FULL()
+    jalr    zero, ra
+
+    .end ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
new file mode 100644
index 0000000..7cef823
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -0,0 +1,12362 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips64'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips64/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <machine/regdef.h>
+
+/* TODO: add the missing file and use its FP register definitions. */
+/* #include <machine/fpregdef.h> */
+/* FP register definitions */
+#define f0  $f0
+#define f1  $f1
+#define f2  $f2
+#define f3  $f3
+#define f12 $f12
+#define f13 $f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+    bgec    \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+    bltc    \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+  reg nick      purpose
+  s0  rPC       interpreted program counter, used for fetching instructions
+  s1  rFP       interpreted frame pointer, used for accessing locals and args
+  s2  rSELF     self (Thread) pointer
+  s3  rINST     first 16-bit code unit of current instruction
+  s4  rIBASE    interpreted instruction base pointer, used for computed goto
+  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC     s0
+#define rFP     s1
+#define rSELF   s2
+#define rINST   s3
+#define rIBASE  s4
+#define rREFS   s5
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    lhu     rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+    daddu   rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset.  Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss.  (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+    ADVANCE \count
+    FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
+ * rINST ahead of possible exception point.  Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+    lhu     rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+    and     \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+    .set noat
+    sll     AT, \reg, 7
+    daddu   AT, rIBASE, AT
+    jic     AT, 0
+    .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_U reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwu     \reg, 0(AT)
+    .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      \reg, 0(AT)
+    .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lw      \reg, 0(AT)
+    lw      AT, 4(AT)
+    dinsu   \reg, AT, 32, 32
+    .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    lwc1    \reg, 0(AT)
+    lw      AT, 4(AT)
+    mthc1   AT, \reg
+    .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rFP, 2
+    sw      \reg, 0(AT)
+    drotr32 \reg, \reg, 0
+    sw      \reg, 4(AT)
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+    .set noat
+    dlsa    AT, \vreg, rREFS, 2
+    sw      zero, 0(AT)
+    sw      zero, 4(AT)
+    dlsa    AT, \vreg, rFP, 2
+    swc1    \reg, 0(AT)
+    mfhc1   \vreg, \reg
+    sw      \vreg, 4(AT)
+    .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_SIZE      64
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN             0x80000000
+#define INT_MIN_AS_FLOAT    0xCF000000
+#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
+#define LONG_MIN            0x8000000000000000
+#define LONG_MIN_AS_FLOAT   0xDF000000
+#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
+
+/* File: mips64/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+    .set    reorder
+
+    .text
+    .global ExecuteMterpImpl
+    .type   ExecuteMterpImpl, %function
+    .balign 16
+/*
+ * On entry:
+ *  a0  Thread* self
+ *  a1  code_item
+ *  a2  ShadowFrame
+ *  a3  JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+    .cfi_startproc
+    .cpsetup t9, t8, ExecuteMterpImpl
+
+    .cfi_def_cfa sp, 0
+    daddu   sp, sp, -STACK_SIZE
+    .cfi_adjust_cfa_offset STACK_SIZE
+
+    sd      t8, STACK_OFFSET_GP(sp)
+    .cfi_rel_offset 28, STACK_OFFSET_GP
+    sd      ra, STACK_OFFSET_RA(sp)
+    .cfi_rel_offset 31, STACK_OFFSET_RA
+
+    sd      s0, STACK_OFFSET_S0(sp)
+    .cfi_rel_offset 16, STACK_OFFSET_S0
+    sd      s1, STACK_OFFSET_S1(sp)
+    .cfi_rel_offset 17, STACK_OFFSET_S1
+    sd      s2, STACK_OFFSET_S2(sp)
+    .cfi_rel_offset 18, STACK_OFFSET_S2
+    sd      s3, STACK_OFFSET_S3(sp)
+    .cfi_rel_offset 19, STACK_OFFSET_S3
+    sd      s4, STACK_OFFSET_S4(sp)
+    .cfi_rel_offset 20, STACK_OFFSET_S4
+    sd      s5, STACK_OFFSET_S5(sp)
+    .cfi_rel_offset 21, STACK_OFFSET_S5
+
+    /* Remember the return register */
+    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+    /* Remember the code_item */
+    sd      a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+    /* set up "named" registers */
+    move    rSELF, a0
+    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
+    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+    dlsa    rREFS, v0, rFP, 2
+    daddu   rPC, a1, CODEITEM_INSNS_OFFSET
+    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+    dlsa    rPC, v0, rPC, 1
+    EXPORT_PC
+
+    /* Starting ibase */
+    REFRESH_IBASE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /* NOTE: no fallthrough */
+
+
+    .global artMterpAsmInstructionStart
+    .type   artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+    .text
+
+/* ------------------------------ */
+    .balign 128
+.L_op_nop: /* 0x00 */
+/* File: mips64/op_nop.S */
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move: /* 0x01 */
+/* File: mips64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT a0, a2              # vA <- vB
+    .else
+    SET_VREG a0, a2                     # vA <- vB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: mips64/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: mips64/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAAAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: mips64/op_move_wide.S */
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: mips64/op_move_wide_from16.S */
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: mips64/op_move_wide_16.S */
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: mips64/op_move_object.S */
+/* File: mips64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT a0, a2              # vA <- vB
+    .else
+    SET_VREG a0, a2                     # vA <- vB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: mips64/op_move_object_from16.S */
+/* File: mips64/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    lhu     a3, 2(rPC)                  # a3 <- BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: mips64/op_move_object_16.S */
+/* File: mips64/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    lhu     a3, 4(rPC)                  # a3 <- BBBB
+    lhu     a2, 2(rPC)                  # a2 <- AAAA
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vBBBB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
+    .else
+    SET_VREG a0, a2                     # vAAAA <- vBBBB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: mips64/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    lw      a0, 0(a0)                   # a0 <- result.i
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT a0, a2              # vAA <- result
+    .else
+    SET_VREG a0, a2                     # vAA <- result
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: mips64/op_move_result_wide.S */
+    /* for: move-result-wide */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    ld      a0, 0(a0)                   # a0 <- result.j
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: mips64/op_move_result_object.S */
+/* File: mips64/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
+    lw      a0, 0(a0)                   # a0 <- result.i
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 1
+    SET_VREG_OBJECT a0, a2              # vAA <- result
+    .else
+    SET_VREG a0, a2                     # vAA <- result
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: mips64/op_move_exception.S */
+    /* move-exception vAA */
+    srl     a2, rINST, 8                # a2 <- AA
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: mips64/op_return_void.S */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return: /* 0x0f */
+/* File: mips64/op_return.S */
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA
+    b       MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: mips64/op_return_wide.S */
+    /*
+     * Return a 64-bit value.
+     */
+    /* return-wide vAA */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vAA
+    b       MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: mips64/op_return_object.S */
+/* File: mips64/op_return.S */
+    /*
+     * Return a 32-bit value.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    .extern MterpSuspendCheck
+    jal     MterpThreadFenceForConstructor
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA
+    b       MterpReturn
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: mips64/op_const_4.S */
+    /* const/4 vA, #+B */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    seh     a0, rINST                   # sign extend B in rINST
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    sra     a0, a0, 12                  # shift B into its final position
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- +B
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: mips64/op_const_16.S */
+    /* const/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const: /* 0x14 */
+/* File: mips64/op_const.S */
+    /* const vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: mips64/op_const_high16.S */
+    /* const/high16 vAA, #+BBBB0000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    sll     a0, a0, 16                  # a0 <- BBBB0000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vAA <- +BBBB0000
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: mips64/op_const_wide_16.S */
+    /* const-wide/16 vAA, #+BBBB */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: mips64/op_const_wide_32.S */
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: mips64/op_const_wide.S */
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    srl     a4, rINST, 8                # a4 <- AA
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
+    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
+    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
+    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
+    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
+    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
+    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: mips64/op_const_wide_high16.S */
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    srl     a2, rINST, 8                # a2 <- AA
+    lh      a0, 2(rPC)                  # a0 <- BBBB
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: mips64/op_const_string.S */
+    /* const/string vAA, String//BBBB */
+    .extern MterpConstString
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: mips64/op_const_string_jumbo.S */
+    /* const/string vAA, String//BBBBBBBB */
+    .extern MterpConstString
+    EXPORT_PC
+    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
+    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
+    srl     a1, rINST, 8                # a1 <- AA
+    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 3                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 3                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: mips64/op_const_class.S */
+    /* const/class vAA, Class//BBBB */
+    .extern MterpConstClass
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    daddu   a2, rFP, OFF_FP_SHADOWFRAME
+    move    a3, rSELF
+    jal     MterpConstClass             # (index, tgt_reg, shadow_frame, self)
+    PREFETCH_INST 2                     # load rINST
+    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: mips64/op_monitor_enter.S */
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    .extern artLockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artLockObjectFromCode
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: mips64/op_monitor_exit.S */
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    .extern artUnlockObjectFromCode
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (object)
+    move    a1, rSELF                   # a1 <- self
+    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
+    bnezc   v0, MterpException
+    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: mips64/op_check_cast.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast vAA, class//BBBB */
+    .extern MterpCheckCast
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- BBBB
+    srl     a1, rINST, 8                # a1 <- AA
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpCheckCast              # (index, &obj, method, self)
+    PREFETCH_INST 2
+    bnez    v0, MterpPossibleException
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: mips64/op_instance_of.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    /* instance-of vA, vB, class//CCCC */
+    .extern MterpInstanceOf
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    dlsa    a1, a1, rFP, 2              # a1 <- &object
+    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
+    move    a3, rSELF                   # a3 <- self
+    jal     MterpInstanceOf             # (index, &obj, method, self)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    ADVANCE 2                           # advance rPC
+    SET_VREG v0, a2                     # vA <- v0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: mips64/op_array_length.S */
+    /*
+     * Return the length of an array.
+     */
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a0, common_errNullObject    # yup, fail
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a3, a2                     # vB <- length
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: mips64/op_new_instance.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance vAA, class//BBBB */
+    .extern MterpNewInstance
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rSELF
+    move    a2, rINST
+    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: mips64/op_new_array.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class//CCCC */
+    .extern MterpNewArray
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpNewArray
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: mips64/op_filled_new_array.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+    .extern MterpFilledNewArray
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rSELF
+    jal     MterpFilledNewArray
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: mips64/op_filled_new_array_range.S */
+/* File: mips64/op_filled_new_array.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+    .extern MterpFilledNewArrayRange
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rSELF
+    jal     MterpFilledNewArrayRange
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: mips64/op_fill_array_data.S */
+    /* fill-array-data vAA, +BBBBBBBB */
+    .extern MterpFillArrayData
+    EXPORT_PC
+    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
+    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
+    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
+    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
+    jal     MterpFillArrayData          # (obj, payload)
+    beqzc   v0, MterpPossibleException  # exception?
+    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_throw: /* 0x27 */
+/* File: mips64/op_throw.S */
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    EXPORT_PC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
+    beqzc   a0, common_errNullObject
+    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
+    b       MterpException
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto: /* 0x28 */
+/* File: mips64/op_goto.S */
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    srl     a0, rINST, 8
+    seb     a0, a0                      # a0 <- sign-extended AA
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a0, 1f                      # AA * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: mips64/op_goto_16.S */
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    lh      a0, 2(rPC)                  # a0 <- sign-extended AAAA
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AAAA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a0, 1f                      # AA * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: mips64/op_goto_32.S */
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    lh      a0, 2(rPC)                  # a0 <- aaaa (low)
+    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
+    ins     a0, a1, 16, 16              # a0 = sign-extended AAAAaaaa
+    dlsa    rPC, a0, rPC, 1             # rPC <- rPC + AAAAAAAA * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgtz    a0, 1f                      # AA * 2 > 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    blez    a0, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: mips64/op_packed_switch.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBBBBBB */
+    .extern MterpDoPackedSwitch
+    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
+    GET_VREG a1, a3                     # a1 <- vAA
+    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
+    jal     MterpDoPackedSwitch                       # v0 <- code-unit branch offset
+    dlsa    rPC, v0, rPC, 1             # rPC <- rPC + offset * 2
+    FETCH_INST                          # load rINST
+#if MTERP_SUSPEND
+    bgtz    v0, 1f                      # offset * 2 > 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    blez    v0, MterpCheckSuspendAndContinue
+#endif
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: mips64/op_sparse_switch.S */
+/* File: mips64/op_packed_switch.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBBBBBB */
+    .extern MterpDoSparseSwitch
+    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
+    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
+    srl     a3, rINST, 8                # a3 <- AA
+    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
+    GET_VREG a1, a3                     # a1 <- vAA
+    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
+    jal     MterpDoSparseSwitch                       # v0 <- code-unit branch offset
+    dlsa    rPC, v0, rPC, 1             # rPC <- rPC + offset * 2
+    FETCH_INST                          # load rINST
+#if MTERP_SUSPEND
+    bgtz    v0, 1f                      # offset * 2 > 0 => no suspend check
+    REFRESH_IBASE
+1:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    blez    v0, MterpCheckSuspendAndContinue
+#endif
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: mips64/op_cmpl_float.S */
+/* File: mips64/fcmp.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    cmp.eq.s f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if 0
+    cmp.lt.s f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.s f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: mips64/op_cmpg_float.S */
+/* File: mips64/fcmp.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    cmp.eq.s f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if 1
+    cmp.lt.s f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.s f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: mips64/op_cmpl_double.S */
+/* File: mips64/fcmpWide.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    cmp.eq.d f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if 0
+    cmp.lt.d f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.d f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: mips64/op_cmpg_double.S */
+/* File: mips64/fcmpWide.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    cmp.eq.d f2, f0, f1
+    li      a0, 0
+    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
+    .if 1
+    cmp.lt.d f2, f0, f1
+    li      a0, -1
+    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
+    li      a0, 1                       # vBB > vCC or unordered
+    .else
+    cmp.lt.d f2, f1, f0
+    li      a0, 1
+    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
+    li      a0, -1                      # vBB < vCC or unordered
+    .endif
+1:
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: mips64/op_cmp_long.S */
+    /* cmp-long vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    slt     a2, a0, a1
+    slt     a0, a1, a0
+    subu    a0, a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                     # vAA <- result
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: mips64/op_if_eq.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    beqc a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: mips64/op_if_ne.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    bnec a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: mips64/op_if_lt.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    bltc a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: mips64/op_if_ge.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    bgec a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: mips64/op_if_gt.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    bgtc a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: mips64/op_if_le.S */
+/* File: mips64/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-le" you would use "le".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+
+    blec a0, a1, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + CCCC * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # CCCC * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: mips64/op_if_eqz.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    beqzc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: mips64/op_if_nez.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    bnezc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: mips64/op_if_ltz.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    bltzc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: mips64/op_if_gez.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    bgezc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: mips64/op_if_gtz.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    bgtzc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: mips64/op_if_lez.S */
+/* File: mips64/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "condition"
+     * fragment that specifies the comparison to perform, e.g. for
+     * "if-lez" you would use "le".
+     *
+     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    lh      a4, 2(rPC)                  # a4 <- sign-extended BBBB
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a2                     # a0 <- vAA
+
+    blezc a0, 1f
+    li      a4, 2                       # offset if branch not taken
+1:
+
+    dlsa    rPC, a4, rPC, 1             # rPC <- rPC + BBBB * 2
+    FETCH_INST                          # load rINST
+
+#if MTERP_SUSPEND
+    bgez    a4, 2f                      # BBBB * 2 >= 0 => no suspend check
+    REFRESH_IBASE
+2:
+#else
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)  # Preload flags for MterpCheckSuspendAndContinue
+    bltz    a4, MterpCheckSuspendAndContinue
+#endif
+
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: mips64/op_unused_3e.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: mips64/op_unused_3f.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: mips64/op_unused_40.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: mips64/op_unused_41.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: mips64/op_unused_42.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: mips64/op_unused_43.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget: /* 0x44 */
+/* File: mips64/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 2
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 2          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lw   a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: mips64/op_aget_wide.S */
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     */
+    /* aget-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a2, a4                # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: mips64/op_aget_object.S */
+    /*
+     * Array object get.  vAA <- vBB[vCC].
+     *
+     * for: aget-object
+     */
+    /* op vAA, vBB, vCC */
+    .extern artAGetObjectFromMterp
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    jal     artAGetObjectFromMterp      # (array, index)
+    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a4, rINST, 8                # a4 <- AA
+    PREFETCH_INST 2
+    bnez    a1, MterpException
+    SET_VREG_OBJECT v0, a4              # vAA <- v0
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: mips64/op_aget_boolean.S */
+/* File: mips64/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 0
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lbu   a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: mips64/op_aget_byte.S */
+/* File: mips64/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 0
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lb   a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: mips64/op_aget_char.S */
+/* File: mips64/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 1
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lhu   a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: mips64/op_aget_short.S */
+/* File: mips64/op_aget.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+     *
+     * NOTE: assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 1
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    lh   a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)        # a2 <- vBB[vCC]
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a2, a4                     # vAA <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput: /* 0x4b */
+/* File: mips64/op_aput.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 2
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 2          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sw  a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: mips64/op_aput_wide.S */
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     */
+    /* aput-wide vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    GET_VREG_WIDE a2, a4                # a2 <- vAA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+    dsrl32  a2, a2, 0
+    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: mips64/op_aput_object.S */
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     */
+    /* op vAA, vBB, vCC */
+    .extern MterpAputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpAputObject
+    beqzc   v0, MterpPossibleException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: mips64/op_aput_boolean.S */
+/* File: mips64/op_aput.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 0
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sb  a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: mips64/op_aput_byte.S */
+/* File: mips64/op_aput.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 0
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 0          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sb  a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: mips64/op_aput_char.S */
+/* File: mips64/op_aput.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 1
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sh  a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: mips64/op_aput_short.S */
+/* File: mips64/op_aput.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     *
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     *
+     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+     * If this changes, specialize.
+     */
+    /* op vAA, vBB, vCC */
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    srl     a4, rINST, 8                # a4 <- AA
+    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
+    GET_VREG a1, a3                     # a1 <- vCC (requested index)
+    beqz    a0, common_errNullObject    # bail if null array object
+    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
+    .if 1
+    # [d]lsa does not support shift count of 0.
+    dlsa    a0, a1, a0, 1          # a0 <- arrayObj + index*width
+    .else
+    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
+    .endif
+    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_VREG a2, a4                     # a2 <- vAA
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    sh  a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)        # vBB[vCC] <- a2
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget: /* 0x52 */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGet32InstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGet32InstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: mips64/op_iget_wide.S */
+    /*
+     * 64-bit instance field get.
+     *
+     * for: iget-wide
+     */
+    .extern artGet64InstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGet64InstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    SET_VREG_WIDE v0, a2                # fp[A] <- v0
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: mips64/op_iget_object.S */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGetObjInstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGetObjInstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 1
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: mips64/op_iget_boolean.S */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGetBooleanInstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGetBooleanInstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: mips64/op_iget_byte.S */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGetByteInstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGetByteInstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: mips64/op_iget_char.S */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGetCharInstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGetCharInstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: mips64/op_iget_short.S */
+/* File: mips64/op_iget.S */
+    /*
+     * General instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    .extern artGetShortInstanceFromCode
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ld       a2, OFF_FP_METHOD(rFP)     # a2 <- referrer
+    move     a3, rSELF                  # a3 <- self
+    jal      artGetShortInstanceFromCode
+    ld       a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext      a2, rINST, 8, 4            # a2 <- A
+    PREFETCH_INST 2
+    bnez     a3, MterpPossibleException # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[A] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput: /* 0x59 */
+/* File: mips64/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern artSet32InstanceFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     artSet32InstanceFromMterp
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: mips64/op_iput_wide.S */
+    /* iput-wide vA, vB, field//CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC
+    lhu      a0, 2(rPC)                 # a0 <- field ref CCCC
+    srl      a1, rINST, 12              # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext      a2, rINST, 8, 4            # a2 <- A
+    dlsa     a2, a2, rFP, 2             # a2 <- &fp[A]
+    ld       a3, OFF_FP_METHOD(rFP)     # a3 <- referrer
+    PREFETCH_INST 2
+    jal      artSet64InstanceFromMterp
+    bnez     v0, MterpPossibleException # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: mips64/op_iput_object.S */
+    .extern MterpIputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpIputObject
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: mips64/op_iput_boolean.S */
+/* File: mips64/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     artSet8InstanceFromMterp
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: mips64/op_iput_byte.S */
+/* File: mips64/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     artSet8InstanceFromMterp
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: mips64/op_iput_char.S */
+/* File: mips64/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     artSet16InstanceFromMterp
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: mips64/op_iput_short.S */
+/* File: mips64/op_iput.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    /* op vA, vB, field//CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref CCCC
+    srl     a1, rINST, 12               # a1 <- B
+    GET_VREG_U a1, a1                   # a1 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    GET_VREG a2, a2                     # a2 <- fp[A]
+    ld      a3, OFF_FP_METHOD(rFP)      # a3 <- referrer
+    PREFETCH_INST 2
+    jal     artSet16InstanceFromMterp
+    bnez    v0, MterpPossibleException  # bail out
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget: /* 0x60 */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGet32StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGet32StaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: mips64/op_sget_wide.S */
+    /*
+     * SGET_WIDE handler wrapper.
+     *
+     */
+    /* sget-wide vAA, field//BBBB */
+    .extern artGet64StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGet64StaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a4, rINST, 8                # a4 <- AA
+    bnez    a3, MterpException          # bail out
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG_WIDE v0, a4
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: mips64/op_sget_object.S */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGetObjStaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGetObjStaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 1
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: mips64/op_sget_boolean.S */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGetBooleanStaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGetBooleanStaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    and v0, v0, 0xff
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: mips64/op_sget_byte.S */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGetByteStaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGetByteStaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    seb v0, v0
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: mips64/op_sget_char.S */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGetCharStaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGetCharStaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    and v0, v0, 0xffff
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: mips64/op_sget_short.S */
+/* File: mips64/op_sget.S */
+    /*
+     * General SGET handler wrapper.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artGetShortStaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    move    a2, rSELF
+    jal     artGetShortStaticFromCode
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    srl     a2, rINST, 8                # a2 <- AA
+    seh v0, v0
+    PREFETCH_INST 2
+    bnez    a3, MterpException          # bail out
+    .if 0
+    SET_VREG_OBJECT v0, a2              # fp[AA] <- v0
+    .else
+    SET_VREG v0, a2                     # fp[AA] <- v0
+    .endif
+    ADVANCE 2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput: /* 0x67 */
+/* File: mips64/op_sput.S */
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artSet32StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet32StaticFromCode
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: mips64/op_sput_wide.S */
+    /*
+     * SPUT_WIDE handler wrapper.
+     *
+     */
+    /* sput-wide vAA, field//BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    ld      a1, OFF_FP_METHOD(rFP)
+    srl     a2, rINST, 8                # a2 <- AA
+    dlsa    a2, a2, rFP, 2
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet64IndirectStaticFromMterp
+    bnezc   v0, MterpException          # 0 on success, -1 on failure
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: mips64/op_sput_object.S */
+    .extern MterpSputObject
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    move    a3, rSELF
+    jal     MterpSputObject
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: mips64/op_sput_boolean.S */
+/* File: mips64/op_sput.S */
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artSet8StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet8StaticFromCode
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: mips64/op_sput_byte.S */
+/* File: mips64/op_sput.S */
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artSet8StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet8StaticFromCode
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: mips64/op_sput_char.S */
+/* File: mips64/op_sput.S */
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artSet16StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet16StaticFromCode
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: mips64/op_sput_short.S */
+/* File: mips64/op_sput.S */
+    /*
+     * General SPUT handler wrapper.
+     *
+     * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    /* op vAA, field//BBBB */
+    .extern artSet16StaticFromCode
+    EXPORT_PC
+    lhu     a0, 2(rPC)                  # a0 <- field ref BBBB
+    srl     a3, rINST, 8                # a3 <- AA
+    GET_VREG a1, a3                     # a1 <- fp[AA]
+    ld      a2, OFF_FP_METHOD(rFP)
+    move    a3, rSELF
+    PREFETCH_INST 2                     # Get next inst, but don't advance rPC
+    jal     artSet16StaticFromCode
+    bnezc   v0, MterpException          # 0 on success
+    ADVANCE 2                           # Past exception point - now advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: mips64/op_invoke_virtual.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtual
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeVirtual
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: mips64/op_invoke_super.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeSuper
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeSuper
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: mips64/op_invoke_direct.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeDirect
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeDirect
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: mips64/op_invoke_static.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeStatic
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeStatic
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: mips64/op_invoke_interface.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeInterface
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeInterface
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: mips64/op_return_void_no_barrier.S */
+    .extern MterpSuspendCheck
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, 1f
+    jal     MterpSuspendCheck           # (self)
+1:
+    li      a0, 0
+    b       MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: mips64/op_invoke_virtual_range.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeVirtualRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: mips64/op_invoke_super_range.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeSuperRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeSuperRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: mips64/op_invoke_direct_range.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeDirectRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeDirectRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: mips64/op_invoke_static_range.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeStaticRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeStaticRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: mips64/op_invoke_interface_range.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeInterfaceRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeInterfaceRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: mips64/op_unused_79.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: mips64/op_unused_7a.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: mips64/op_neg_int.S */
+/* File: mips64/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    subu    a0, zero, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: mips64/op_not_int.S */
+/* File: mips64/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    nor     a0, zero, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: mips64/op_neg_long.S */
+/* File: mips64/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * For: not-long, neg-long
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    dsubu   a0, zero, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: mips64/op_not_long.S */
+/* File: mips64/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * For: not-long, neg-long
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a3                # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    nor     a0, zero, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: mips64/op_neg_float.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    neg.s   f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: mips64/op_neg_double.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    neg.d   f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: mips64/op_int_to_long.S */
+    /* int-to-long vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2                # vA <- vB
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: mips64/op_int_to_float.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.s.w f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: mips64/op_int_to_double.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.d.w f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: mips64/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: mips64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_VREG a0, a3                     # a0 <- vB
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    .if 0
+    SET_VREG_OBJECT a0, a2              # vA <- vB
+    .else
+    SET_VREG a0, a2                     # vA <- vB
+    .endif
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: mips64/op_long_to_float.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.s.l f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: mips64/op_long_to_double.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.d.l f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: mips64/op_float_to_int.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li      t0, INT_MIN_AS_FLOAT
+    mtc1    t0, f1
+    cmp.le.s f1, f1, f0
+    bc1nez  f1, .Lop_float_to_int_trunc
+    cmp.eq.s f1, f0, f0
+    li      t0, INT_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .Lop_float_to_int_done
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: mips64/op_float_to_long.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li      t0, LONG_MIN_AS_FLOAT
+    mtc1    t0, f1
+    cmp.le.s f1, f1, f0
+    bc1nez  f1, .Lop_float_to_long_trunc
+    cmp.eq.s f1, f0, f0
+    dli     t0, LONG_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .Lop_float_to_long_done
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: mips64/op_float_to_double.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_FLOAT f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.d.s f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: mips64/op_double_to_int.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    dli     t0, INT_MIN_AS_DOUBLE
+    dmtc1   t0, f1
+    cmp.le.d f1, f1, f0
+    bc1nez  f1, .Lop_double_to_int_trunc
+    cmp.eq.d f1, f0, f0
+    li      t0, INT_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .Lop_double_to_int_done
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: mips64/op_double_to_long.S */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    dli     t0, LONG_MIN_AS_DOUBLE
+    dmtc1   t0, f1
+    cmp.le.d f1, f1, f0
+    bc1nez  f1, .Lop_double_to_long_trunc
+    cmp.eq.d f1, f0, f0
+    dli     t0, LONG_MIN
+    mfc1    t1, f1
+    and     t0, t0, t1
+    b       .Lop_double_to_long_done
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: mips64/op_double_to_float.S */
+    /*
+     * Conversion from or to floating-point happens in a floating-point register.
+     * Therefore we load the input and store the output into or from a
+     * floating-point register irrespective of the type.
+     */
+/* File: mips64/fcvtHeader.S */
+    /*
+     * Loads a specified register from vB. Used primarily for conversions
+     * from or to a floating-point type.
+     *
+     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+     * store the result in vA and jump to the next instruction.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     */
+    ext     a1, rINST, 8, 4             # a1 <- A
+    srl     a2, rINST, 12               # a2 <- B
+    GET_VREG_DOUBLE f0, a2
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+
+    cvt.s.d f0, f0
+/* File: mips64/fcvtFooter.S */
+    /*
+     * Stores a specified register containing the result of conversion
+     * from or to a floating-point type and jumps to the next instruction.
+     *
+     * Expects a1 to contain the destination Dalvik register number.
+     * a1 is set up by fcvtHeader.S.
+     *
+     * For: int-to-float, int-to-double, long-to-float, long-to-double,
+     *      float-to-int, float-to-long, float-to-double, double-to-int,
+     *      double-to-long, double-to-float, neg-float, neg-double.
+     *
+     * Note that this file can't be included after a break in other files
+     * and in those files its contents appear as a copy.
+     * See: float-to-int, float-to-long, double-to-int, double-to-long.
+     */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: mips64/op_int_to_byte.S */
+/* File: mips64/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    seb     a0, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: mips64/op_int_to_char.S */
+/* File: mips64/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    and     a0, a0, 0xffff                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: mips64/op_int_to_short.S */
+/* File: mips64/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "a0 = op a0".
+     *
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      not-int, neg-int
+     */
+    /* unop vA, vB */
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    ext     a2, rINST, 8, 4             # a2 <- A
+                               # optional op
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    seh     a0, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                     # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: mips64/op_add_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: mips64/op_sub_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    subu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: mips64/op_mul_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: mips64/op_div_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    div a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: mips64/op_rem_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: mips64/op_and_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: mips64/op_or_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: mips64/op_xor_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: mips64/op_shl_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: mips64/op_shr_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: mips64/op_ushr_int.S */
+/* File: mips64/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG a0, a2                     # a0 <- vBB
+    GET_VREG a1, a3                     # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a4                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: mips64/op_add_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    daddu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: mips64/op_sub_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dsubu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: mips64/op_mul_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dmul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: mips64/op_div_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    ddiv a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: mips64/op_rem_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dmod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: mips64/op_and_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: mips64/op_or_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: mips64/op_xor_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: mips64/op_shl_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dsll a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: mips64/op_shr_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dsra a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: mips64/op_ushr_long.S */
+/* File: mips64/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, shl-long, shr-long, ushr-long
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_WIDE a0, a2                # a0 <- vBB
+    GET_VREG_WIDE a1, a3                # a1 <- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    dsrl a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a4           # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: mips64/op_add_float.S */
+/* File: mips64/fbinop.S */
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    add.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: mips64/op_sub_float.S */
+/* File: mips64/fbinop.S */
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    sub.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: mips64/op_mul_float.S */
+/* File: mips64/fbinop.S */
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    mul.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: mips64/op_div_float.S */
+/* File: mips64/fbinop.S */
+    /*:
+     * Generic 32-bit floating-point operation.
+     *
+     * For: add-float, sub-float, mul-float, div-float.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f0, a2               # f0 <- vBB
+    GET_VREG_FLOAT f1, a3               # f1 <- vCC
+    div.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: mips64/op_rem_float.S */
+    /* rem-float vAA, vBB, vCC */
+    .extern fmodf
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_FLOAT f12, a2              # f12 <- vBB
+    GET_VREG_FLOAT f13, a3              # f13 <- vCC
+    jal     fmodf                       # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a4               # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double: /* 0xab */
+/* File: mips64/op_add_double.S */
+/* File: mips64/fbinopWide.S */
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    add.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: mips64/op_sub_double.S */
+/* File: mips64/fbinopWide.S */
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    sub.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: mips64/op_mul_double.S */
+/* File: mips64/fbinopWide.S */
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    mul.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double: /* 0xae */
+/* File: mips64/op_div_double.S */
+/* File: mips64/fbinopWide.S */
+    /*:
+     * Generic 64-bit floating-point operation.
+     *
+     * For: add-double, sub-double, mul-double, div-double.
+     * form: <op> f0, f0, f1
+     */
+    /* binop vAA, vBB, vCC */
+    srl     a4, rINST, 8                # a4 <- AA
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
+    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
+    div.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: mips64/op_rem_double.S */
+    /* rem-double vAA, vBB, vCC */
+    .extern fmod
+    lbu     a2, 2(rPC)                  # a2 <- BB
+    lbu     a3, 3(rPC)                  # a3 <- CC
+    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
+    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
+    jal     fmod                        # f0 <- f12 op f13
+    srl     a4, rINST, 8                # a4 <- AA
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a4              # vAA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: mips64/op_add_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: mips64/op_sub_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    subu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: mips64/op_mul_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: mips64/op_div_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    div a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: mips64/op_rem_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: mips64/op_and_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: mips64/op_or_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: mips64/op_xor_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: mips64/op_shl_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: mips64/op_shr_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: mips64/op_ushr_int_2addr.S */
+/* File: mips64/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a2                     # a0 <- vA
+    GET_VREG a1, a3                     # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: mips64/op_add_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    daddu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: mips64/op_sub_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dsubu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: mips64/op_mul_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dmul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: mips64/op_div_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    ddiv a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: mips64/op_rem_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dmod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: mips64/op_and_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: mips64/op_or_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: mips64/op_xor_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: mips64/op_shl_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dsll a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: mips64/op_shr_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dsra a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips64/op_ushr_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vB (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+     * correctly.
+     *
+     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_WIDE a0, a2                # a0 <- vA
+    GET_VREG_WIDE a1, a3                # a1 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+                               # optional op
+    dsrl a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE a0, a2           # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: mips64/op_add_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    add.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: mips64/op_sub_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    sub.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: mips64/op_mul_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    mul.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: mips64/op_div_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+    /*:
+     * Generic 32-bit "/2addr" floating-point operation.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f0, a2               # f0 <- vA
+    GET_VREG_FLOAT f1, a3               # f1 <- vB
+    div.s f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: mips64/op_rem_float_2addr.S */
+    /* rem-float/2addr vA, vB */
+    .extern fmodf
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_FLOAT f12, a2              # f12 <- vA
+    GET_VREG_FLOAT f13, a3              # f13 <- vB
+    jal     fmodf                       # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_FLOAT f0, a2               # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: mips64/op_add_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    add.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: mips64/op_sub_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    sub.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: mips64/op_mul_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    mul.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: mips64/op_div_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+    /*:
+     * Generic 64-bit "/2addr" floating-point operation.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+     * form: <op> f0, f0, f1
+     */
+    /* binop/2addr vA, vB */
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f0, a2              # f0 <- vA
+    GET_VREG_DOUBLE f1, a3              # f1 <- vB
+    div.d f0, f0, f1                              # f0 <- f0 op f1
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: mips64/op_rem_double_2addr.S */
+    /* rem-double/2addr vA, vB */
+    .extern fmod
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG_DOUBLE f12, a2             # f12 <- vA
+    GET_VREG_DOUBLE f13, a3             # f13 <- vB
+    jal     fmod                        # f0 <- f12 op f13
+    ext     a2, rINST, 8, 4             # a2 <- A
+    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_DOUBLE f0, a2              # vA <- f0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: mips64/op_add_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: mips64/op_rsub_int.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    subu a0, a1, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: mips64/op_mul_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: mips64/op_div_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    div a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: mips64/op_rem_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: mips64/op_and_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: mips64/op_or_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: mips64/op_xor_int_lit16.S */
+/* File: mips64/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CCCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    /* binop/lit16 vA, vB, #+CCCC */
+    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
+    ext     a2, rINST, 8, 4             # a2 <- A
+    ext     a3, rINST, 12, 4            # a3 <- B
+    GET_VREG a0, a3                     # a0 <- vB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: mips64/op_add_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    addu a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips64/op_rsub_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    subu a0, a1, a0                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: mips64/op_mul_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mul a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: mips64/op_div_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    div a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: mips64/op_rem_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 1
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    mod a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: mips64/op_and_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    and a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: mips64/op_or_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    or a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: mips64/op_xor_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    xor a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: mips64/op_shl_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    sll a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: mips64/op_shr_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    sra a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips64/op_ushr_int_lit8.S */
+/* File: mips64/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * CC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    /* binop/lit8 vAA, vBB, #+CC */
+    lbu     a3, 2(rPC)                  # a3 <- BB
+    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
+    srl     a2, rINST, 8                # a2 <- AA
+    GET_VREG a0, a3                     # a0 <- vBB
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+                               # optional op
+    srl a0, a0, a1                              # a0 <- op, a0-a3 changed
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG a0, a2                # vAA <- a0
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: mips64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    lw   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: mips64/op_iget_wide_quick.S */
+    /* iget-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a4, 2(rPC)                  # a4 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    daddu   a4, a3, a4                  # create direct pointer
+    lw      a0, 0(a4)
+    lw      a1, 4(a4)
+    dinsu   a0, a1, 32, 32
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG_WIDE a0, a2
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: mips64/op_iget_object_quick.S */
+    /* For: iget-object-quick */
+    /* op vA, vB, offset//CCCC */
+    .extern artIGetObjectFromMterp
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    EXPORT_PC
+    GET_VREG_U a0, a2                   # a0 <- object we're operating on
+    jal     artIGetObjectFromMterp      # (obj, offset)
+    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
+    ext     a2, rINST, 8, 4             # a2 <- A
+    PREFETCH_INST 2
+    bnez    a3, MterpPossibleException  # bail out
+    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
+    ADVANCE 2                           # advance rPC
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: mips64/op_iput_quick.S */
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    sw  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: mips64/op_iput_wide_quick.S */
+    /* iput-wide-quick vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a3, 2(rPC)                  # a3 <- field byte offset
+    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
+    ext     a0, rINST, 8, 4             # a0 <- A
+    beqz    a2, common_errNullObject    # object was null
+    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a2, a3                  # create a direct pointer
+    sw      a0, 0(a1)
+    dsrl32  a0, a0, 0
+    sw      a0, 4(a1)
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: mips64/op_iput_object_quick.S */
+    .extern MterpIputObjectQuick
+    EXPORT_PC
+    daddu   a0, rFP, OFF_FP_SHADOWFRAME
+    move    a1, rPC
+    move    a2, rINST
+    jal     MterpIputObjectQuick
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips64/op_invoke_virtual_quick.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualQuick
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeVirtualQuick
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips64/op_invoke_virtual_range_quick.S */
+/* File: mips64/invoke.S */
+    /*
+     * Generic invoke handler wrapper.
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualQuickRange
+    EXPORT_PC
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    move    a2, rPC
+    move    a3, rINST
+    jal     MterpInvokeVirtualQuickRange
+    beqzc   v0, MterpException
+    FETCH_ADVANCE_INST 3
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: mips64/op_iput_boolean_quick.S */
+/* File: mips64/op_iput_quick.S */
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    sb  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: mips64/op_iput_byte_quick.S */
+/* File: mips64/op_iput_quick.S */
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    sb  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: mips64/op_iput_char_quick.S */
+/* File: mips64/op_iput_quick.S */
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    sh  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: mips64/op_iput_short_quick.S */
+/* File: mips64/op_iput_quick.S */
+    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
+    ext     a2, rINST, 8, 4             # a2 <- A
+    beqz    a3, common_errNullObject    # object was null
+    GET_VREG a0, a2                     # a0 <- fp[A]
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    daddu   a1, a1, a3
+    sh  a0, 0(a1)                   # obj.field <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: mips64/op_iget_boolean_quick.S */
+/* File: mips64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    lbu   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: mips64/op_iget_byte_quick.S */
+/* File: mips64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    lb   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: mips64/op_iget_char_quick.S */
+/* File: mips64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    lhu   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: mips64/op_iget_short_quick.S */
+/* File: mips64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+    /* op vA, vB, offset//CCCC */
+    srl     a2, rINST, 12               # a2 <- B
+    lhu     a1, 2(rPC)                  # a1 <- field byte offset
+    GET_VREG_U a3, a2                   # a3 <- object we're operating on
+    ext     a4, rINST, 8, 4             # a4 <- A
+    daddu   a1, a1, a3
+    beqz    a3, common_errNullObject    # object was null
+    lh   a0, 0(a1)                   # a0 <- obj.field
+    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
+    SET_VREG a0, a4                     # fp[A] <- a0
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    GOTO_OPCODE v0                      # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: mips64/op_unused_f4.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+    b       MterpFallback
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: mips64/op_unused_fa.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: mips64/op_unused_fb.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: mips64/op_unused_fc.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: mips64/op_unused_fd.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: mips64/op_unused_fe.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: mips64/op_unused_ff.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    b       MterpFallback
+
+
+    .balign 128
+    .size   artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+    .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global artMterpAsmSisterStart
+    .type   artMterpAsmSisterStart, %function
+    .text
+    .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_float_to_int */
+.Lop_float_to_int_trunc:
+    trunc.w.s f0, f0
+    mfc1    t0, f0
+.Lop_float_to_int_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* continuation for op_float_to_long */
+.Lop_float_to_long_trunc:
+    trunc.l.s f0, f0
+    dmfc1   t0, f0
+.Lop_float_to_long_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* continuation for op_double_to_int */
+.Lop_double_to_int_trunc:
+    trunc.w.d f0, f0
+    mfc1    t0, f0
+.Lop_double_to_int_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+/* continuation for op_double_to_long */
+.Lop_double_to_long_trunc:
+    trunc.l.d f0, f0
+    dmfc1   t0, f0
+.Lop_double_to_long_done:
+    /* Can't include fcvtFooter.S after break */
+    GET_INST_OPCODE v0                  # extract opcode from rINST
+    SET_VREG_WIDE t0, a1
+    GOTO_OPCODE v0                      # jump to next instruction
+
+    .size   artMterpAsmSisterStart, .-artMterpAsmSisterStart
+    .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+    .global artMterpAsmAltInstructionStart
+    .type   artMterpAsmAltInstructionStart, %function
+    .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (0 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (1 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (2 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (3 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (4 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (5 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (6 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (7 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (8 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (9 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (10 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (11 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (12 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (13 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (14 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (15 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (16 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (17 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (18 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (19 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (20 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (21 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (22 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (23 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (24 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (25 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (26 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (27 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (28 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (29 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (30 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (31 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (32 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (33 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (34 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (35 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (36 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (37 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (38 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (39 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (40 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (41 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (42 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (43 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (44 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (45 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (46 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (47 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (48 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (49 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (50 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (51 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (52 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (53 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (54 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (55 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (56 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (57 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (58 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (59 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (60 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (61 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (62 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (63 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (64 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (65 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (66 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (67 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (68 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (69 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (70 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (71 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (72 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (73 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (74 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (75 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (76 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (77 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (78 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (79 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (80 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (81 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (82 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (83 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (84 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (85 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (86 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (87 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (88 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (89 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (90 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (91 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (92 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (93 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (94 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (95 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (96 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (97 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (98 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (99 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (100 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (101 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (102 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (103 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (104 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (105 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (106 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (107 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (108 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (109 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (110 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (111 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (112 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (113 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (114 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (115 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (116 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (117 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (118 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (119 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (120 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (121 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (122 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (123 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (124 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (125 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (126 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (127 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (128 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (129 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (130 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (131 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (132 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (133 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (134 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (135 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (136 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (137 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (138 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (139 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (140 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (141 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (142 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (143 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (144 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (145 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (146 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (147 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (148 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (149 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (150 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (151 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (152 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (153 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (154 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (155 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (156 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (157 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (158 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (159 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (160 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (161 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (162 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (163 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (164 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (165 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (166 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (167 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (168 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (169 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (170 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (171 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (172 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (173 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (174 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (175 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (176 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (177 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (178 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (179 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (180 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (181 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (182 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (183 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (184 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (185 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (186 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (187 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (188 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (189 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (190 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (191 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (192 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (193 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (194 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (195 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (196 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (197 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (198 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (199 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (200 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (201 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (202 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (203 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (204 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (205 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (206 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (207 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (208 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (209 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (210 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (211 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (212 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (213 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (214 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (215 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (216 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (217 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (218 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (219 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (220 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (221 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (222 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (223 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (224 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (225 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (226 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (227 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (228 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (229 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (230 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (231 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (232 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (233 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (234 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (235 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (236 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (237 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (238 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (239 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (240 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (241 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (242 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (243 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (244 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (245 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (246 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (247 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (248 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (249 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (250 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (251 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (252 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (253 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (254 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Note that the call to MterpCheckBefore is done as a tail call.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    dla     ra, artMterpAsmInstructionStart
+    dla     t9, MterpCheckBefore
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    daddu   ra, ra, (255 * 128)            # Addr of primary handler.
+    jalr    zero, t9                            # (self, shadow_frame) Note: tail call.
+
+    .balign 128
+    .size   artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+    .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: mips64/footer.S */
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+    .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogDivideByZeroException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogArrayIndexException
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogArrayIndexException
+#endif
+    b       MterpCommonFallback
+
+    .extern MterpLogNullObjectException
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogNullObjectException
+#endif
+    b       MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
+    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
+    /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+    .extern MterpHandleException
+MterpException:
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpHandleException                    # (self, shadow_frame)
+    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
+    ld      a0, OFF_FP_CODE_ITEM(rFP)
+    lwu     a1, OFF_FP_DEX_PC(rFP)
+    REFRESH_IBASE
+    daddu   rPC, a0, CODEITEM_INSNS_OFFSET
+    dlsa    rPC, a1, rPC, 1                         # generate new dex_pc_ptr
+    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* resume execution at catch block */
+    FETCH_INST
+    GET_INST_OPCODE v0
+    GOTO_OPCODE v0
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in ra.
+ */
+    .extern MterpSuspendCheck
+MterpCheckSuspendAndContinue:
+    REFRESH_IBASE
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    bnez    ra, check1
+    GET_INST_OPCODE v0                              # extract opcode from rINST
+    GOTO_OPCODE v0                                  # jump to next instruction
+check1:
+    EXPORT_PC
+    move    a0, rSELF
+    jal     MterpSuspendCheck                       # (self)
+    GET_INST_OPCODE v0                              # extract opcode from rINST
+    GOTO_OPCODE v0                                  # jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+    .extern MterpLogFallback
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    move    a0, rSELF
+    daddu   a1, rFP, OFF_FP_SHADOWFRAME
+    jal     MterpLogFallback
+#endif
+MterpCommonFallback:
+    li      v0, 0                                   # signal retry with reference interpreter.
+    b       MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA.  Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    li      v0, 1                                   # signal return to caller.
+    b       MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be 0.
+ */
+MterpReturn:
+    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
+    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
+    sd      a0, 0(a2)
+    move    a0, rSELF
+    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    beqzc   ra, check2
+    jal     MterpSuspendCheck                       # (self)
+check2:
+    li      v0, 1                                   # signal return to caller.
+MterpDone:
+    ld      s5, STACK_OFFSET_S5(sp)
+    .cfi_restore 21
+    ld      s4, STACK_OFFSET_S4(sp)
+    .cfi_restore 20
+    ld      s3, STACK_OFFSET_S3(sp)
+    .cfi_restore 19
+    ld      s2, STACK_OFFSET_S2(sp)
+    .cfi_restore 18
+    ld      s1, STACK_OFFSET_S1(sp)
+    .cfi_restore 17
+    ld      s0, STACK_OFFSET_S0(sp)
+    .cfi_restore 16
+
+    ld      ra, STACK_OFFSET_RA(sp)
+    .cfi_restore 31
+
+    ld      t8, STACK_OFFSET_GP(sp)
+    .cpreturn
+    .cfi_restore 28
+
+    .set    noreorder
+    jr      ra
+    daddu   sp, sp, STACK_SIZE
+    .cfi_adjust_cfa_offset -STACK_SIZE
+
+    .cfi_endproc
+    .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index d365a4f..ebac5fc 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -112,25 +112,32 @@
     #define SYMBOL(name) name
 #endif
 
+.macro PUSH _reg
+    pushl \_reg
+    .cfi_adjust_cfa_offset 4
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popl \_reg
+    .cfi_adjust_cfa_offset -4
+    .cfi_restore \_reg
+.endm
+
 /* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address
+ * Remember about 4 bytes for return address + 4 * 4 for spills
  */
-#define FRAME_SIZE     44
+#define FRAME_SIZE     28
 
 /* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        (FRAME_SIZE + 16)
-#define IN_ARG2        (FRAME_SIZE + 12)
-#define IN_ARG1        (FRAME_SIZE +  8)
-#define IN_ARG0        (FRAME_SIZE +  4)
-#define CALLER_RP      (FRAME_SIZE +  0)
+#define IN_ARG3        (FRAME_SIZE + 16 + 16)
+#define IN_ARG2        (FRAME_SIZE + 16 + 12)
+#define IN_ARG1        (FRAME_SIZE + 16 +  8)
+#define IN_ARG0        (FRAME_SIZE + 16 +  4)
 /* Spill offsets relative to %esp */
-#define EBP_SPILL      (FRAME_SIZE -  4)
-#define EDI_SPILL      (FRAME_SIZE -  8)
-#define ESI_SPILL      (FRAME_SIZE - 12)
-#define EBX_SPILL      (FRAME_SIZE - 16)
-#define LOCAL0         (FRAME_SIZE - 20)
-#define LOCAL1         (FRAME_SIZE - 24)
-#define LOCAL2         (FRAME_SIZE - 28)
+#define LOCAL0         (FRAME_SIZE -  4)
+#define LOCAL1         (FRAME_SIZE -  8)
+#define LOCAL2         (FRAME_SIZE - 12)
 /* Out Arg offsets, relative to %esp */
 #define OUT_ARG3       ( 12)
 #define OUT_ARG2       (  8)
@@ -163,13 +170,26 @@
 #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
 #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
 
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
 /*
- *
- * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
- * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
- * mterp should do so as well.
+ * Profile branch. rINST should contain the offset. %eax is scratch.
  */
-#define MTERP_SUSPEND 0
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+    RESTORE_IBASE
+#endif
+.endm
 
 /*
  * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
@@ -347,16 +367,18 @@
 
 SYMBOL(ExecuteMterpImpl):
     .cfi_startproc
+    .cfi_def_cfa esp, 4
+
+    /* Spill callee save regs */
+    PUSH    %ebp
+    PUSH    %edi
+    PUSH    %esi
+    PUSH    %ebx
+
     /* Allocate frame */
     subl    $FRAME_SIZE, %esp
     .cfi_adjust_cfa_offset FRAME_SIZE
 
-    /* Spill callee save regs */
-    movl    %ebp, EBP_SPILL(%esp)
-    movl    %edi, EDI_SPILL(%esp)
-    movl    %esi, ESI_SPILL(%esp)
-    movl    %ebx, EBX_SPILL(%esp)
-
     /* Load ShadowFrame pointer */
     movl    IN_ARG2(%esp), %edx
 
@@ -1076,17 +1098,12 @@
  * double to get a byte offset.
  */
     /* goto +AA */
-    movsbl  rINSTbl, %eax                   # eax <- ssssssAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movsbl  rINSTbl, rINST                  # rINST <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle      MterpCheckSuspendAndContinue   # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1100,17 +1117,12 @@
  * double to get a byte offset.
  */
     /* goto/16 +AAAA */
-    movswl  2(rPC), %eax                    # eax <- ssssAAAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movswl  2(rPC), rINST                   # rINST <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1129,17 +1141,12 @@
  * offset to byte offset.
  */
     /* goto/32 +AAAAAAAA */
-    movl    2(rPC), %eax                    # eax <- AAAAAAAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movl    2(rPC), rINST                   # rINST <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1162,17 +1169,13 @@
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
     call    SYMBOL(MterpDoPackedSwitch)
-    addl    %eax, %eax
-    leal    (rPC, %eax), rPC
+    movl    %eax, rINST
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST
+    leal    (rPC, rINST), rPC
     FETCH_INST
     REFRESH_IBASE
-    jg      1f
-#if MTERP_SUSPEND
-    #     REFRESH_IBASE - we did it above.
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1196,17 +1199,13 @@
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
     call    SYMBOL(MterpDoSparseSwitch)
-    addl    %eax, %eax
-    leal    (rPC, %eax), rPC
+    movl    %eax, rINST
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST
+    leal    (rPC, rINST), rPC
     FETCH_INST
     REFRESH_IBASE
-    jg      1f
-#if MTERP_SUSPEND
-    #     REFRESH_IBASE - we did it above.
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
 
 
@@ -1424,20 +1423,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     jne   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1459,20 +1453,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     je   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1494,20 +1483,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     jge   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1529,20 +1513,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     jl   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1564,20 +1543,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     jle   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1599,20 +1573,15 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST
     jg   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1630,20 +1599,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     jne   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1661,20 +1625,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     je   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1692,20 +1651,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     jge   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1723,20 +1677,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     jl   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1754,20 +1703,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     jle   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1785,20 +1729,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST
     jg   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -3059,8 +2998,13 @@
     call    SYMBOL(MterpInvokeVirtual)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a virtual method call.
@@ -3092,8 +3036,13 @@
     call    SYMBOL(MterpInvokeSuper)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a "super" method call.
@@ -3125,8 +3074,13 @@
     call    SYMBOL(MterpInvokeDirect)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3151,8 +3105,13 @@
     call    SYMBOL(MterpInvokeStatic)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 
@@ -3178,8 +3137,13 @@
     call    SYMBOL(MterpInvokeInterface)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle an interface method call.
@@ -3225,8 +3189,13 @@
     call    SYMBOL(MterpInvokeVirtualRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3251,8 +3220,13 @@
     call    SYMBOL(MterpInvokeSuperRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3277,8 +3251,13 @@
     call    SYMBOL(MterpInvokeDirectRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3303,8 +3282,13 @@
     call    SYMBOL(MterpInvokeStaticRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3329,8 +3313,13 @@
     call    SYMBOL(MterpInvokeInterfaceRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -6072,8 +6061,13 @@
     call    SYMBOL(MterpInvokeVirtualQuick)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -6098,8 +6092,13 @@
     call    SYMBOL(MterpInvokeVirtualQuickRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -12818,7 +12817,6 @@
  * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
  * TUNING: for consistency, we may want to just go ahead and handle these here.
  */
-#define MTERP_LOGGING 0
 common_errDivideByZero:
     EXPORT_PC
 #if MTERP_LOGGING
@@ -12922,13 +12920,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movl    OFF_FP_CODE_ITEM(rFP), %eax
     movl    OFF_FP_DEX_PC(rFP), %ecx
     lea     CODEITEM_INSNS_OFFSET(%eax), rPC
     lea     (rPC, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
@@ -12949,6 +12951,21 @@
     GOTO_NEXT
 
 /*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $1, %eax
+    jmp     MterpDone
+
+/*
  * Bail out to reference interpreter.
  */
 MterpFallback:
@@ -12977,17 +12994,16 @@
     movl    %ecx, 4(%edx)
     mov     $1, %eax
 MterpDone:
-    /* Restore callee save register */
-    movl    EBP_SPILL(%esp), %ebp
-    movl    EDI_SPILL(%esp), %edi
-    movl    ESI_SPILL(%esp), %esi
-    movl    EBX_SPILL(%esp), %ebx
-
     /* pop up frame */
     addl    $FRAME_SIZE, %esp
     .cfi_adjust_cfa_offset -FRAME_SIZE
-    ret
 
+    /* Restore callee save register */
+    POP     %ebx
+    POP     %esi
+    POP     %edi
+    POP     %ebp
+    ret
     .cfi_endproc
     SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
 
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
new file mode 100644
index 0000000..a1360e0
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -0,0 +1,11960 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'x86_64'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: x86_64/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+   rbx, rbp, r12-r15
+Return regs:
+   32-bit in eax
+   64-bit in rax
+   fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left.  On entry to target, first
+param is at 8(%esp).  Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+  nick     reg   purpose
+  rSELF    rbp   pointer to ThreadSelf.
+  rPC      r12   interpreted program counter, used for fetching instructions
+  rFP      r13   interpreted frame pointer, used for accessing locals and args
+  rINSTw   bx    first 16-bit code of current instruction
+  rINSTbl  bl    opcode portion of instruction word
+  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   r14   base of instruction handler table
+  rREFS    r15   base of object references in shadow frame.
+
+Notes:
+   o High order 16 bits of ebx must be zero on entry to handler
+   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $(value)
+    #define FUNCTION_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+#else
+    #define MACRO_LITERAL(value) $value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+#endif
+
+.macro PUSH _reg
+    pushq \_reg
+    .cfi_adjust_cfa_offset 8
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popq \_reg
+    .cfi_adjust_cfa_offset -8
+    .cfi_restore \_reg
+.endm
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE     8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3        %rcx
+#define IN_ARG2        %rdx
+#define IN_ARG1        %rsi
+#define IN_ARG0        %rdi
+/* Out Args  */
+#define OUT_ARG3       %rcx
+#define OUT_ARG2       %rdx
+#define OUT_ARG1       %rsi
+#define OUT_ARG0       %rdi
+#define OUT_32_ARG3    %ecx
+#define OUT_32_ARG2    %edx
+#define OUT_32_ARG1    %esi
+#define OUT_32_ARG0    %edi
+#define OUT_FP_ARG1    %xmm1
+#define OUT_FP_ARG0    %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF    %rbp
+#define rPC      %r12
+#define rFP      %r13
+#define rINST    %ebx
+#define rINSTq   %rbx
+#define rINSTw   %bx
+#define rINSTbh  %bh
+#define rINSTbl  %bl
+#define rIBASE   %r14
+#define rREFS    %r15
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * Profile branch. rINST should contain the offset. %eax is scratch.
+ */
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+#endif
+.endm
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE
+    movq    THREAD_CURRENT_IBASE_OFFSET(rSELF), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+    movb    rINSTbl, rINSTbh
+    movb    $\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    movzwq  (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+    movzx   rINSTbl,%eax
+    movzbl  rINSTbh,rINST
+    shll    MACRO_LITERAL(7), %eax
+    addq    rIBASE, %rax
+    jmp     *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+    leaq    2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+    ADVANCE_PC \_count
+    FETCH_INST
+    GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+    movl    (rFP,\_vreg,4), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+    movq    (rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+    movl    \_reg, (rFP,\_vreg,4)
+    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+    movq    \_reg, (rFP,\_vreg,4)
+    xorq    \_reg, \_reg
+    movq    \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+    movl    \_reg, (rFP,\_vreg,4)
+    movl    \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+    movl    4(rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+    movl    \_reg, 4(rFP,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_REF _vreg
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+/* File: x86_64/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .global SYMBOL(ExecuteMterpImpl)
+    FUNCTION_TYPE(ExecuteMterpImpl)
+
+/*
+ * On entry:
+ *  0  Thread* self
+ *  1  code_item
+ *  2  ShadowFrame
+ *  3  JValue* result_register
+ *
+ */
+
+SYMBOL(ExecuteMterpImpl):
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+
+    /* Spill callee save regs */
+    PUSH %rbx
+    PUSH %rbp
+    PUSH %r12
+    PUSH %r13
+    PUSH %r14
+    PUSH %r15
+
+    /* Allocate frame */
+    subq    $FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset FRAME_SIZE
+
+    /* Remember the return register */
+    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+    /* Remember the code_item */
+    movq    IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+
+    /* set up "named" registers */
+    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+    leaq    (rFP, %rax, 4), rREFS
+    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+    leaq    CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
+    leaq    (rPC, %rax, 2), rPC
+    EXPORT_PC
+
+    /* Starting ibase */
+    movq    IN_ARG0, rSELF
+    REFRESH_IBASE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+
+
+    .global SYMBOL(artMterpAsmInstructionStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+    .text
+
+/* ------------------------------ */
+    .balign 128
+.L_op_nop: /* 0x00 */
+/* File: x86_64/op_nop.S */
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move: /* 0x01 */
+/* File: x86_64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $0xf, %al                      # eax <- A
+    shrl    $4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if 0
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: x86_64/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzwq  2(rPC), %rax                    # eax <- BBBB
+    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
+    .if 0
+    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: x86_64/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwq  4(rPC), %rcx                    # ecx <- BBBB
+    movzwq  2(rPC), %rax                    # eax <- AAAA
+    GET_VREG %edx, %rcx
+    .if 0
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: x86_64/op_move_wide.S */
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: x86_64/op_move_wide_from16.S */
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  2(rPC), %ecx                    # ecx <- BBBB
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: x86_64/op_move_wide_16.S */
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwq  4(rPC), %rcx                    # ecx<- BBBB
+    movzwq  2(rPC), %rax                    # eax<- AAAA
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: x86_64/op_move_object.S */
+/* File: x86_64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $0xf, %al                      # eax <- A
+    shrl    $4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if 1
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: x86_64/op_move_object_from16.S */
+/* File: x86_64/op_move_from16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzwq  2(rPC), %rax                    # eax <- BBBB
+    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
+    .if 1
+    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: x86_64/op_move_object_16.S */
+/* File: x86_64/op_move_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwq  4(rPC), %rcx                    # ecx <- BBBB
+    movzwq  2(rPC), %rax                    # eax <- AAAA
+    GET_VREG %edx, %rcx
+    .if 1
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: x86_64/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movl    (%rax), %eax                    # r0 <- result.i.
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: x86_64/op_move_result_wide.S */
+    /* move-result-wide vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movq    (%rax), %rdx                         # Get wide
+    SET_WIDE_VREG %rdx, rINSTq                   # v[AA] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: x86_64/op_move_result_object.S */
+/* File: x86_64/op_move_result.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movl    (%rax), %eax                    # r0 <- result.i.
+    .if 1
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: x86_64/op_move_exception.S */
+    /* move-exception vAA */
+    movl    THREAD_EXCEPTION_OFFSET(rSELF), %eax
+    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
+    movl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: x86_64/op_return_void.S */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return: /* 0x0f */
+/* File: x86_64/op_return.S */
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    jmp     MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: x86_64/op_return_wide.S */
+/*
+ * Return a 64-bit value.
+ */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_WIDE_VREG %rax, rINSTq              # eax <- v[AA]
+    jmp     MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: x86_64/op_return_object.S */
+/* File: x86_64/op_return.S */
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    jmp     MterpReturn
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: x86_64/op_const_4.S */
+    /* const/4 vA, #+B */
+    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
+    movl    $0xf, rINST
+    andl    %eax, rINST                     # rINST <- A
+    sarl    $4, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: x86_64/op_const_16.S */
+    /* const/16 vAA, #+BBBB */
+    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
+    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const: /* 0x14 */
+/* File: x86_64/op_const.S */
+    /* const vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # grab all 32 bits at once
+    SET_VREG %eax, rINSTq                   # vAA<- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: x86_64/op_const_high16.S */
+    /* const/high16 vAA, #+BBBB0000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    $16, %eax                      # eax <- BBBB0000
+    SET_VREG %eax, rINSTq                   # vAA <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: x86_64/op_const_wide_16.S */
+    /* const-wide/16 vAA, #+BBBB */
+    movswq  2(rPC), %rax                    # rax <- ssssBBBB
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: x86_64/op_const_wide_32.S */
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: x86_64/op_const_wide.S */
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: x86_64/op_const_wide_high16.S */
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    movzwq  2(rPC), %rax                    # eax <- 0000BBBB
+    salq    $48, %rax                      # eax <- BBBB0000
+    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: x86_64/op_const_string.S */
+    /* const/string vAA, String@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: x86_64/op_const_string_jumbo.S */
+    /* const/string vAA, String@BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- BBBB
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: x86_64/op_const_class.S */
+    /* const/class vAA, Class@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstClass)         # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: x86_64/op_monitor_enter.S */
+/*
+ * Synchronize on an object.
+ */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: x86_64/op_monitor_exit.S */
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction.  See the Dalvik
+ * instruction spec.
+ */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: x86_64/op_check_cast.S */
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG1
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: x86_64/op_instance_of.S */
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- CCCC
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $4, %eax                       # eax <- B
+    leaq    VREG_ADDRESS(%rax), OUT_ARG1    # Get object address
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
+    movsbl  %al, %eax
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    andb    $0xf, rINSTbl                  # rINSTbl <- A
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: x86_64/op_array_length.S */
+/*
+ * Return the length of an array.
+ */
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $4, rINST                      # rINST <- B
+    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
+    testl   %ecx, %ecx                      # is null?
+    je      common_errNullObject
+    andb    $0xf, %al                      # eax <- A
+    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+    SET_VREG rINST, %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: x86_64/op_new_instance.S */
+/*
+ * Create a new instance of a class.
+ */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rSELF, OUT_ARG1
+    REFRESH_INST 34
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpNewInstance)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: x86_64/op_new_array.S */
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST 35
+    movq    rINSTq, OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpNewArray)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: x86_64/op_filled_new_array.S */
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern MterpFilledNewArray
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL(MterpFilledNewArray)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: x86_64/op_filled_new_array_range.S */
+/* File: x86_64/op_filled_new_array.S */
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern MterpFilledNewArrayRange
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL(MterpFilledNewArrayRange)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: x86_64/op_fill_array_data.S */
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
+    leaq    (rPC,%rcx,2), OUT_ARG1          # OUT_ARG1 <- PC + BBBBbbbb*2
+    GET_VREG OUT_32_ARG0, rINSTq            # OUT_ARG0 <- vAA (array object)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+    .balign 128
+.L_op_throw: /* 0x27 */
+/* File: x86_64/op_throw.S */
+/*
+ * Throw an exception object in the current thread.
+ */
+    /* throw vAA */
+    EXPORT_PC
+    GET_VREG %eax, rINSTq                   # eax<- vAA (exception object)
+    testb   %al, %al
+    jz      common_errNullObject
+    movq    %rax, THREAD_EXCEPTION_OFFSET(rSELF)
+    jmp     MterpException
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto: /* 0x28 */
+/* File: x86_64/op_goto.S */
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto +AA */
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: x86_64/op_goto_16.S */
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto/16 +AAAA */
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+/* ------------------------------ */
+    .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: x86_64/op_goto_32.S */
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ *  Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+    /* goto/32 +AAAAAAAA */
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+/* ------------------------------ */
+    .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: x86_64/op_packed_switch.S */
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movslq  2(rPC), OUT_ARG0                # rcx <- BBBBbbbb
+    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
+    call    SYMBOL(MterpDoPackedSwitch)
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue
+    GOTO_NEXT
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: x86_64/op_sparse_switch.S */
+/* File: x86_64/op_packed_switch.S */
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movslq  2(rPC), OUT_ARG0                # rcx <- BBBBbbbb
+    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
+    call    SYMBOL(MterpDoSparseSwitch)
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: x86_64/op_cmpl_float.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    movss VREG_ADDRESS(%rax), %xmm0
+    xor     %eax, %eax
+    ucomiss VREG_ADDRESS(%rcx), %xmm0
+    jp      .Lop_cmpl_float_nan_is_neg
+    je      .Lop_cmpl_float_finish
+    jb      .Lop_cmpl_float_less
+.Lop_cmpl_float_nan_is_pos:
+    addb    $1, %al
+    jmp     .Lop_cmpl_float_finish
+.Lop_cmpl_float_nan_is_neg:
+.Lop_cmpl_float_less:
+    movl    $-1, %eax
+.Lop_cmpl_float_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: x86_64/op_cmpg_float.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    movss VREG_ADDRESS(%rax), %xmm0
+    xor     %eax, %eax
+    ucomiss VREG_ADDRESS(%rcx), %xmm0
+    jp      .Lop_cmpg_float_nan_is_pos
+    je      .Lop_cmpg_float_finish
+    jb      .Lop_cmpg_float_less
+.Lop_cmpg_float_nan_is_pos:
+    addb    $1, %al
+    jmp     .Lop_cmpg_float_finish
+.Lop_cmpg_float_nan_is_neg:
+.Lop_cmpg_float_less:
+    movl    $-1, %eax
+.Lop_cmpg_float_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: x86_64/op_cmpl_double.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    movsd VREG_ADDRESS(%rax), %xmm0
+    xor     %eax, %eax
+    ucomisd VREG_ADDRESS(%rcx), %xmm0
+    jp      .Lop_cmpl_double_nan_is_neg
+    je      .Lop_cmpl_double_finish
+    jb      .Lop_cmpl_double_less
+.Lop_cmpl_double_nan_is_pos:
+    addb    $1, %al
+    jmp     .Lop_cmpl_double_finish
+.Lop_cmpl_double_nan_is_neg:
+.Lop_cmpl_double_less:
+    movl    $-1, %eax
+.Lop_cmpl_double_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: x86_64/op_cmpg_double.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    movsd VREG_ADDRESS(%rax), %xmm0
+    xor     %eax, %eax
+    ucomisd VREG_ADDRESS(%rcx), %xmm0
+    jp      .Lop_cmpg_double_nan_is_pos
+    je      .Lop_cmpg_double_finish
+    jb      .Lop_cmpg_double_less
+.Lop_cmpg_double_nan_is_pos:
+    addb    $1, %al
+    jmp     .Lop_cmpg_double_finish
+.Lop_cmpg_double_nan_is_neg:
+.Lop_cmpg_double_less:
+    movl    $-1, %eax
+.Lop_cmpg_double_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: x86_64/op_cmp_long.S */
+/*
+ * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+    /* cmp-long vAA, vBB, vCC */
+    movzbq  2(rPC), %rdx                    # edx <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rdx, %rdx                # rdx <- v[BB]
+    xorl    %eax, %eax
+    xorl    %edi, %edi
+    addb    $1, %al
+    movl    $-1, %esi
+    cmpq    VREG_ADDRESS(%rcx), %rdx
+    cmovl   %esi, %edi
+    cmovg   %eax, %edi
+    SET_VREG %edi, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: x86_64/op_if_eq.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    jne   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: x86_64/op_if_ne.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    je   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: x86_64/op_if_lt.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    jge   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: x86_64/op_if_ge.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    jl   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: x86_64/op_if_gt.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    jle   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: x86_64/op_if_le.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $2, rINST                      # assume not taken
+    jg   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: x86_64/op_if_eqz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    jne   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: x86_64/op_if_nez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    je   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: x86_64/op_if_ltz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    jge   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: x86_64/op_if_gez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    jl   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: x86_64/op_if_gtz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    jle   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: x86_64/op_if_lez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $2, rINST                      # assume branch not taken
+    jg   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: x86_64/op_unused_3e.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: x86_64/op_unused_3f.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: x86_64/op_unused_40.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: x86_64/op_unused_41.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: x86_64/op_unused_42.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: x86_64/op_unused_43.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget: /* 0x44 */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    movq    MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movl   MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: x86_64/op_aget_wide.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 1
+    movq    MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movq   MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: x86_64/op_aget_object.S */
+/*
+ * Array object get.  vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG OUT_32_ARG0, %rax              # eax <- vBB (array object)
+    GET_VREG OUT_32_ARG1, %rcx              # ecx <- vCC (requested index)
+    EXPORT_PC
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    SET_VREG_OBJECT %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: x86_64/op_aget_boolean.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    movq    MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movzbl   MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: x86_64/op_aget_byte.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    movq    MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movsbl   MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: x86_64/op_aget_char.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    movq    MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movzwl   MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: x86_64/op_aget_short.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    movq    MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    movswl   MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput: /* 0x4b */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movl    rINST, MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: x86_64/op_aput_wide.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 1
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movq    rINSTq, MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: x86_64/op_aput_object.S */
+/*
+ * Store an object into an array.  vBB[vCC] <- vAA.
+ */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST 77
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpAputObject)         # (array, index)
+    testb   %al, %al
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: x86_64/op_aput_boolean.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movb    rINSTbl, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: x86_64/op_aput_byte.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movb    rINSTbl, MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: x86_64/op_aput_char.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movw    rINSTw, MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: x86_64/op_aput_short.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if 0
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    movw    rINSTw, MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget: /* 0x52 */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGet32InstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: x86_64/op_iget_wide.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGet64InstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 1
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: x86_64/op_iget_object.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGetObjInstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: x86_64/op_iget_boolean.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGetBooleanInstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: x86_64/op_iget_byte.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGetByteInstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: x86_64/op_iget_char.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGetCharInstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: x86_64/op_iget_short.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(artGetShortInstanceFromCode)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput: /* 0x59 */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern artSet32InstanceFromMterp
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet32InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: x86_64/op_iput_wide.S */
+    /* iput-wide vA, vB, field@CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST <- A
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG2  # &fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet64InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: x86_64/op_iput_object.S */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST 91
+    movl    rINST, OUT_32_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpIputObject)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: x86_64/op_iput_boolean.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet8InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: x86_64/op_iput_byte.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern artSet8InstanceFromMterp
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet8InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: x86_64/op_iput_char.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet16InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: x86_64/op_iput_short.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern artSet16InstanceFromMterp
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet16InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget: /* 0x60 */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGet32StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGet32StaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: x86_64/op_sget_wide.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGet64StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGet64StaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 1
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: x86_64/op_sget_object.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGetObjStaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGetObjStaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 1
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: x86_64/op_sget_boolean.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGetBooleanStaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGetBooleanStaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: x86_64/op_sget_byte.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGetByteStaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGetByteStaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: x86_64/op_sget_char.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGetCharStaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGetCharStaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: x86_64/op_sget_short.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern artGetShortStaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL(artGetShortStaticFromCode)
+    cmpl    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if 0
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if 0
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput: /* 0x67 */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern artSet32StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet32StaticFromCode)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: x86_64/op_sput_wide.S */
+/*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+    /* sput-wide vAA, field@BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG2  # &fp[AA]
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet64IndirectStaticFromMterp)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: x86_64/op_sput_object.S */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST 105
+    movq    rINSTq, OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpSputObject)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: x86_64/op_sput_boolean.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern artSet8StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet8StaticFromCode)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: x86_64/op_sput_byte.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern artSet8StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet8StaticFromCode)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: x86_64/op_sput_char.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern artSet16StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet16StaticFromCode)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: x86_64/op_sput_short.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern artSet16StaticFromCode
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet16StaticFromCode)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: x86_64/op_invoke_virtual.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtual
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 110
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeVirtual)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: x86_64/op_invoke_super.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeSuper
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 111
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeSuper)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: x86_64/op_invoke_direct.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeDirect
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 112
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeDirect)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: x86_64/op_invoke_static.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeStatic
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 113
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeStatic)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: x86_64/op_invoke_interface.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeInterface
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 114
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeInterface)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+    .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: x86_64/op_return_void_no_barrier.S */
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: x86_64/op_invoke_virtual_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 116
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeVirtualRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: x86_64/op_invoke_super_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeSuperRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 117
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeSuperRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: x86_64/op_invoke_direct_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeDirectRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 118
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeDirectRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: x86_64/op_invoke_static_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeStaticRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 119
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeStaticRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: x86_64/op_invoke_interface_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeInterfaceRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 120
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeInterfaceRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: x86_64/op_unused_79.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: x86_64/op_unused_7a.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: x86_64/op_neg_int.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+    negl    %eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: x86_64/op_not_int.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+    notl    %eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: x86_64/op_neg_long.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+    negq    %rax
+    .if 1
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: x86_64/op_not_long.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+    notq    %rax
+    .if 1
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: x86_64/op_neg_float.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+    xorl    $0x80000000, %eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: x86_64/op_neg_double.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+    movq    $0x8000000000000000, %rsi
+    xorq    %rsi, %rax
+    .if 1
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: x86_64/op_int_to_long.S */
+    /* int to long vA, vB */
+    movzbq  rINSTbl, %rax                   # rax <- +A
+    sarl    $4, %eax                       # eax <- B
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movslq  VREG_ADDRESS(%rax), %rax
+    SET_WIDE_VREG %rax, rINSTq              # v[A] <- %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: x86_64/op_int_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtsi2ssl    VREG_ADDRESS(rINSTq), %xmm0
+    .if 0
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: x86_64/op_int_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtsi2sdl    VREG_ADDRESS(rINSTq), %xmm0
+    .if 1
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: x86_64/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: x86_64/op_move.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $0xf, %al                      # eax <- A
+    shrl    $4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if 0
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: x86_64/op_long_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtsi2ssq    VREG_ADDRESS(rINSTq), %xmm0
+    .if 0
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: x86_64/op_long_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtsi2sdq    VREG_ADDRESS(rINSTq), %xmm0
+    .if 1
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: x86_64/op_float_to_int.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    movss   VREG_ADDRESS(rINSTq), %xmm0
+    movl  $0x7fffffff, %eax
+    cvtsi2ssl %eax, %xmm1
+    comiss    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvttss2sil  %xmm0, %eax
+    jmp     1f
+2:
+    xorl    %eax, %eax
+1:
+    .if 0
+    SET_WIDE_VREG %eax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: x86_64/op_float_to_long.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    movss   VREG_ADDRESS(rINSTq), %xmm0
+    movq  $0x7fffffffffffffff, %rax
+    cvtsi2ssq %rax, %xmm1
+    comiss    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvttss2siq  %xmm0, %rax
+    jmp     1f
+2:
+    xorq    %rax, %rax
+1:
+    .if 1
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %rax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: x86_64/op_float_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtss2sd    VREG_ADDRESS(rINSTq), %xmm0
+    .if 1
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: x86_64/op_double_to_int.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    movsd   VREG_ADDRESS(rINSTq), %xmm0
+    movl  $0x7fffffff, %eax
+    cvtsi2sdl %eax, %xmm1
+    comisd    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvttsd2sil  %xmm0, %eax
+    jmp     1f
+2:
+    xorl    %eax, %eax
+1:
+    .if 0
+    SET_WIDE_VREG %eax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: x86_64/op_double_to_long.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    movsd   VREG_ADDRESS(rINSTq), %xmm0
+    movq  $0x7fffffffffffffff, %rax
+    cvtsi2sdq %rax, %xmm1
+    comisd    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvttsd2siq  %xmm0, %rax
+    jmp     1f
+2:
+    xorq    %rax, %rax
+1:
+    .if 1
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %rax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: x86_64/op_double_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    cvtsd2ss    VREG_ADDRESS(rINSTq), %xmm0
+    .if 0
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: x86_64/op_int_to_byte.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+movsbl  %al, %eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: x86_64/op_int_to_char.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+movzwl  %ax,%eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: x86_64/op_int_to_short.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4,rINST                       # rINST <- B
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $0xf,%cl                       # ecx <- A
+
+movswl %ax, %eax
+    .if 0
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: x86_64/op_add_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    addl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: x86_64/op_sub_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    subl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: x86_64/op_mul_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    imull   (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: x86_64/op_div_int.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if 0
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG %ecx, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG %ecx, %rcx                  # ecx <- vCC
+    .endif
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl  $-1, %ecx
+    je      2f
+    cdq                                    # rdx:rax <- sign-extended of rax
+    idivl   %ecx
+1:
+    .if 0
+    SET_WIDE_VREG %eax, rINSTq           # eax <- vBB
+    .else
+    SET_VREG %eax, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 0
+    xorl %eax, %eax
+    .else
+    negl %eax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: x86_64/op_rem_int.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if 0
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG %ecx, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG %ecx, %rcx                  # ecx <- vCC
+    .endif
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl  $-1, %ecx
+    je      2f
+    cdq                                    # rdx:rax <- sign-extended of rax
+    idivl   %ecx
+1:
+    .if 0
+    SET_WIDE_VREG %edx, rINSTq           # eax <- vBB
+    .else
+    SET_VREG %edx, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 1
+    xorl %edx, %edx
+    .else
+    negl %edx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: x86_64/op_and_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    andl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: x86_64/op_or_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    orl     (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: x86_64/op_xor_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    xorl    (rFP,%rcx,4), %eax                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: x86_64/op_shl_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 0
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    sall    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    sall    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: x86_64/op_shr_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 0
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: x86_64/op_ushr_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 0
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: x86_64/op_add_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    addq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: x86_64/op_sub_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    subq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: x86_64/op_mul_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    imulq   (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: x86_64/op_div_long.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if 1
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG %rcx, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG %rcx, %rcx                  # ecx <- vCC
+    .endif
+    testq   %rcx, %rcx
+    jz      common_errDivideByZero
+    cmpq  $-1, %rcx
+    je      2f
+    cqo                                    # rdx:rax <- sign-extended of rax
+    idivq   %rcx
+1:
+    .if 1
+    SET_WIDE_VREG %rax, rINSTq           # eax <- vBB
+    .else
+    SET_VREG %rax, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 0
+    xorq %rax, %rax
+    .else
+    negq %rax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: x86_64/op_rem_long.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if 1
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG %rcx, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG %rcx, %rcx                  # ecx <- vCC
+    .endif
+    testq   %rcx, %rcx
+    jz      common_errDivideByZero
+    cmpq  $-1, %rcx
+    je      2f
+    cqo                                    # rdx:rax <- sign-extended of rax
+    idivq   %rcx
+1:
+    .if 1
+    SET_WIDE_VREG %rdx, rINSTq           # eax <- vBB
+    .else
+    SET_VREG %rdx, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 1
+    xorq %rdx, %rdx
+    .else
+    negq %rdx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: x86_64/op_and_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    andq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: x86_64/op_or_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    orq     (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: x86_64/op_xor_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    xorq    (rFP,%rcx,4), %rax                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: x86_64/op_shl_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 1
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    salq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    salq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: x86_64/op_shr_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 1
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    sarq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    sarq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: x86_64/op_ushr_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if 1
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    shrq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    shrq    %cl, %rax                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: x86_64/op_add_float.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    addss VREG_ADDRESS(%rax), %xmm0
+    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: x86_64/op_sub_float.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    subss VREG_ADDRESS(%rax), %xmm0
+    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: x86_64/op_mul_float.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    mulss VREG_ADDRESS(%rax), %xmm0
+    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: x86_64/op_div_float.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movss   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    divss VREG_ADDRESS(%rax), %xmm0
+    movss   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movss   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: x86_64/op_rem_float.S */
+    /* rem_float vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    flds    VREG_ADDRESS(%rcx)              # vBB to fp stack
+    flds    VREG_ADDRESS(%rax)              # vCC to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(rINSTq)            # %st to vAA
+    CLEAR_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double: /* 0xab */
+/* File: x86_64/op_add_double.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    addsd VREG_ADDRESS(%rax), %xmm0
+    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: x86_64/op_sub_double.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    subsd VREG_ADDRESS(%rax), %xmm0
+    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: x86_64/op_mul_double.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    mulsd VREG_ADDRESS(%rax), %xmm0
+    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double: /* 0xae */
+/* File: x86_64/op_div_double.S */
+/* File: x86_64/sseBinop.S */
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movsd   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    divsd VREG_ADDRESS(%rax), %xmm0
+    movsd   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: x86_64/op_rem_double.S */
+    /* rem_double vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    fldl    VREG_ADDRESS(%rcx)              # %st1 <- fp[vBB]
+    fldl    VREG_ADDRESS(%rax)              # %st0 <- fp[vCC]
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(rINSTq)            # fp[vAA] <- %st
+    CLEAR_WIDE_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: x86_64/op_add_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    addl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: x86_64/op_sub_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    subl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: x86_64/op_mul_int_2addr.S */
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    imull   (rFP,rINSTq,4), %eax
+    SET_VREG %eax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: x86_64/op_div_int_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # rcx <- B
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG %ecx, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG %ecx, %rcx                  # ecx <- vB
+    .endif
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl  $-1, %ecx
+    je      2f
+    cdq                                    # rdx:rax <- sign-extended of rax
+    idivl   %ecx
+1:
+    .if 0
+    SET_WIDE_VREG %eax, rINSTq           # vA <- result
+    .else
+    SET_VREG %eax, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if 0
+    xorl %eax, %eax
+    .else
+    negl %eax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: x86_64/op_rem_int_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # rcx <- B
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG %ecx, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG %ecx, %rcx                  # ecx <- vB
+    .endif
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl  $-1, %ecx
+    je      2f
+    cdq                                    # rdx:rax <- sign-extended of rax
+    idivl   %ecx
+1:
+    .if 0
+    SET_WIDE_VREG %edx, rINSTq           # vA <- result
+    .else
+    SET_VREG %edx, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if 1
+    xorl %edx, %edx
+    .else
+    negl %edx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: x86_64/op_and_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    andl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: x86_64/op_or_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    orl     %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: x86_64/op_xor_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    xorl    %eax, (rFP,%rcx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: x86_64/op_shl_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    sall    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    sall    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: x86_64/op_shr_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    sarl    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    sarl    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: x86_64/op_ushr_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 0
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    shrl    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    shrl    %cl, %eax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: x86_64/op_add_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    addq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: x86_64/op_sub_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    subq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: x86_64/op_mul_long_2addr.S */
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, %rcx                # rax <- vA
+    imulq   (rFP,rINSTq,4), %rax
+    SET_WIDE_VREG %rax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: x86_64/op_div_long_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # rcx <- B
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG %rcx, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG %rcx, %rcx                  # ecx <- vB
+    .endif
+    testq   %rcx, %rcx
+    jz      common_errDivideByZero
+    cmpq  $-1, %rcx
+    je      2f
+    cqo                                    # rdx:rax <- sign-extended of rax
+    idivq   %rcx
+1:
+    .if 1
+    SET_WIDE_VREG %rax, rINSTq           # vA <- result
+    .else
+    SET_VREG %rax, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if 0
+    xorq %rax, %rax
+    .else
+    negq %rax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: x86_64/op_rem_long_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # rcx <- B
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG %rcx, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG %rcx, %rcx                  # ecx <- vB
+    .endif
+    testq   %rcx, %rcx
+    jz      common_errDivideByZero
+    cmpq  $-1, %rcx
+    je      2f
+    cqo                                    # rdx:rax <- sign-extended of rax
+    idivq   %rcx
+1:
+    .if 1
+    SET_WIDE_VREG %rdx, rINSTq           # vA <- result
+    .else
+    SET_VREG %rdx, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if 1
+    xorq %rdx, %rdx
+    .else
+    negq %rdx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: x86_64/op_and_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    andq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: x86_64/op_or_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    orq     %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: x86_64/op_xor_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    andb    $0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    xorq    %rax, (rFP,%rcx,4)                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: x86_64/op_shl_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    salq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    salq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: x86_64/op_shr_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    sarq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    sarq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: x86_64/op_ushr_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    .if 1
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    shrq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    shrq    %cl, %rax                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: x86_64/op_add_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    addss VREG_ADDRESS(rINSTq), %xmm0
+    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: x86_64/op_sub_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    subss VREG_ADDRESS(rINSTq), %xmm0
+    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: x86_64/op_mul_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    mulss VREG_ADDRESS(rINSTq), %xmm0
+    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: x86_64/op_div_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movss VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    divss VREG_ADDRESS(rINSTq), %xmm0
+    movss %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movss %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: x86_64/op_rem_float_2addr.S */
+    /* rem_float/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    flds    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $0xf, %cl                      # ecx <- A
+    flds    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: x86_64/op_add_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    addsd VREG_ADDRESS(rINSTq), %xmm0
+    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: x86_64/op_sub_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    subsd VREG_ADDRESS(rINSTq), %xmm0
+    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: x86_64/op_mul_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    mulsd VREG_ADDRESS(rINSTq), %xmm0
+    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: x86_64/op_div_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $0xf, %ecx                     # ecx <- A
+    movsd VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $4, rINST                      # rINST<- B
+    divsd VREG_ADDRESS(rINSTq), %xmm0
+    movsd %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movsd %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: x86_64/op_rem_double_2addr.S */
+    /* rem_double/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $4, rINST                      # rINST <- B
+    fldl    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $0xf, %cl                      # ecx <- A
+    fldl    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: x86_64/op_add_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    addl    %ecx, %eax                                  # for example: addl %ecx, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: x86_64/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    subl    %eax, %ecx                                  # for example: addl %ecx, %eax
+    SET_VREG %ecx, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: x86_64/op_mul_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    imull   %ecx, %eax                                  # for example: addl %ecx, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: x86_64/op_div_int_lit16.S */
+/* File: x86_64/bindivLit16.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $0xf, rINSTbl                  # rINST <- A
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl    $-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG %eax, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 0
+    xorl    %eax, %eax
+    .else
+    negl    %eax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: x86_64/op_rem_int_lit16.S */
+/* File: x86_64/bindivLit16.S */
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $0xf, rINSTbl                  # rINST <- A
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl    $-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG %edx, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 1
+    xorl    %edx, %edx
+    .else
+    negl    %edx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: x86_64/op_and_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andl    %ecx, %eax                                  # for example: addl %ecx, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: x86_64/op_or_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    orl     %ecx, %eax                                  # for example: addl %ecx, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: x86_64/op_xor_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    xorl    %ecx, %eax                                  # for example: addl %ecx, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: x86_64/op_add_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    addl    %ecx, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: x86_64/op_rsub_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    subl    %eax, %ecx                                  # ex: addl %ecx,%eax
+    SET_VREG %ecx, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: x86_64/op_mul_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    imull   %ecx, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: x86_64/op_div_int_lit8.S */
+/* File: x86_64/bindivLit8.S */
+/*
+ * 32-bit div/rem "lit8" binary operation.  Handles special case of
+ * op0=minint & op1=-1
+ */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG  %eax, %rax                    # eax <- rBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG %eax, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 0
+    xorl    %eax, %eax
+    .else
+    negl    %eax
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: x86_64/op_rem_int_lit8.S */
+/* File: x86_64/bindivLit8.S */
+/*
+ * 32-bit div/rem "lit8" binary operation.  Handles special case of
+ * op0=minint & op1=-1
+ */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG  %eax, %rax                    # eax <- rBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG %edx, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if 1
+    xorl    %edx, %edx
+    .else
+    negl    %edx
+    .endif
+    jmp     1b
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: x86_64/op_and_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    andl    %ecx, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: x86_64/op_or_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    orl     %ecx, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: x86_64/op_xor_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    xorl    %ecx, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: x86_64/op_shl_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    sall    %cl, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: x86_64/op_shr_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    sarl    %cl, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: x86_64/op_ushr_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    shrl    %cl, %eax                                  # ex: addl %ecx,%eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 0
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: x86_64/op_iget_wide_quick.S */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 1
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movswl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: x86_64/op_iget_object_quick.S */
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    .extern artIGetObjectFromMterp
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG0, %rcx              # vB (object we're operating on)
+    movzwl  2(rPC), OUT_32_ARG1             # eax <- field byte offset
+    EXPORT_PC
+    callq   SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $0xf, rINSTbl                  # rINST <- A
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: x86_64/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    movl    rINST, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: x86_64/op_iput_wide_quick.S */
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    movzbq    rINSTbl, %rcx                 # rcx<- BA
+    sarl      $4, %ecx                     # ecx<- B
+    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
+    testl     %ecx, %ecx                    # is object null?
+    je        common_errNullObject
+    movzwq    2(rPC), %rax                  # rax<- field byte offset
+    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
+    andb      $0xf, rINSTbl                # rINST<- A
+    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
+    movq      %rax, (%rcx)                  # obj.field<- r0/r1
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: x86_64/op_iput_object_quick.S */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST 232
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpIputObjectQuick)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: x86_64/op_invoke_virtual_quick.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualQuick
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 233
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeVirtualQuick)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: x86_64/op_invoke_virtual_range_quick.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern MterpInvokeVirtualQuickRange
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST 234
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL(MterpInvokeVirtualQuickRange)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: x86_64/op_iput_boolean_quick.S */
+/* File: x86_64/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    movb    rINSTbl, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: x86_64/op_iput_byte_quick.S */
+/* File: x86_64/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    movb    rINSTbl, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: x86_64/op_iput_char_quick.S */
+/* File: x86_64/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    movw    rINSTw, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: x86_64/op_iput_short_quick.S */
+/* File: x86_64/op_iput_quick.S */
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    movw    rINSTw, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: x86_64/op_iget_boolean_quick.S */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 0
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movsbl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: x86_64/op_iget_byte_quick.S */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 0
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movsbl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: x86_64/op_iget_char_quick.S */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 0
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movzwl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: x86_64/op_iget_short_quick.S */
+/* File: x86_64/op_iget_quick.S */
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $0xf,rINSTbl                   # rINST <- A
+    .if 0
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    movswl (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: x86_64/op_unused_f4.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: x86_64/op_unused_fa.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: x86_64/op_unused_fb.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: x86_64/op_unused_fc.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: x86_64/op_unused_fd.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: x86_64/op_unused_fe.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+/* ------------------------------ */
+    .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: x86_64/op_unused_ff.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
+
+
+    .balign 128
+    SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
+    .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global SYMBOL(artMterpAsmSisterStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
+    .text
+    .balign 4
+SYMBOL(artMterpAsmSisterStart):
+
+    SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
+    .global SYMBOL(artMterpAsmSisterEnd)
+SYMBOL(artMterpAsmSisterEnd):
+
+
+    .global SYMBOL(artMterpAsmAltInstructionStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
+    .text
+
+SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(0*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(1*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(2*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(3*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(4*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(5*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(6*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(7*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(8*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(9*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(10*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(11*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(12*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(13*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(14*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(15*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(16*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(17*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(18*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(19*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(20*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(21*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(22*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(23*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(24*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(25*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(26*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(27*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(28*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(29*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(30*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(31*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(32*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(33*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(34*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(35*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(36*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(37*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(38*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(39*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(40*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(41*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(42*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(43*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(44*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(45*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(46*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(47*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(48*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(49*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(50*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(51*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(52*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(53*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(54*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(55*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(56*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(57*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(58*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(59*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(60*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(61*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(62*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(63*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(64*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(65*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(66*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(67*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(68*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(69*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(70*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(71*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(72*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(73*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(74*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(75*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(76*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(77*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(78*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(79*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(80*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(81*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(82*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(83*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(84*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(85*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(86*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(87*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(88*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(89*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(90*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(91*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(92*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(93*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(94*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(95*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(96*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(97*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(98*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(99*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(100*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(101*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(102*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(103*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(104*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(105*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(106*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(107*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(108*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(109*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(110*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(111*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(112*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(113*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(114*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(115*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(116*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(117*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(118*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(119*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(120*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(121*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(122*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(123*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(124*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(125*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(126*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(127*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(128*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(129*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(130*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(131*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(132*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(133*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(134*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(135*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(136*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(137*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(138*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(139*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(140*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(141*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(142*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(143*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(144*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(145*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(146*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(147*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(148*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(149*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(150*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(151*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(152*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(153*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(154*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(155*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(156*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(157*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(158*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(159*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(160*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(161*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(162*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(163*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(164*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(165*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(166*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(167*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(168*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(169*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(170*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(171*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(172*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(173*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(174*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(175*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(176*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(177*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(178*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(179*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(180*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(181*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(182*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(183*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(184*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(185*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(186*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(187*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(188*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(189*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(190*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(191*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(192*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(193*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(194*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(195*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(196*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(197*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(198*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(199*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(200*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(201*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(202*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(203*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(204*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(205*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(206*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(207*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(208*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(209*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(210*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(211*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(212*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(213*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(214*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(215*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(216*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(217*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(218*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(219*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(220*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(221*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(222*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(223*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(224*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(225*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(226*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(227*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(228*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(229*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(230*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(231*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(232*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(233*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(234*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(235*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(236*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(237*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(238*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(239*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(240*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(241*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(242*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(243*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(244*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(245*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(246*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(247*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(248*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(249*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(250*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(251*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(252*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(253*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(254*128)
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(255*128)
+
+    .balign 128
+    SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
+    .global SYMBOL(artMterpAsmAltInstructionEnd)
+SYMBOL(artMterpAsmAltInstructionEnd):
+/* File: x86_64/footer.S */
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogDivideByZeroException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogArrayIndexException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNoSuchMethodException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNullObjectException)
+#endif
+    jmp     MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogExceptionThrownException)
+#endif
+    jmp     MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    THREAD_FLAGS_OFFSET(rSELF), OUT_32_ARG2
+    call    SYMBOL(MterpLogSuspendFallback)
+#endif
+    jmp     MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    cmpq    $0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jz      MterpFallback
+    /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpHandleException)
+    testb   %al, %al
+    jz      MterpExceptionReturn
+    movq    OFF_FP_CODE_ITEM(rFP), %rax
+    mov     OFF_FP_DEX_PC(rFP), %ecx
+    leaq    CODEITEM_INSNS_OFFSET(%rax), rPC
+    leaq    (rPC, %rcx, 2), rPC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    /* resume execution at catch block */
+    REFRESH_IBASE
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+    REFRESH_IBASE
+    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GOTO_NEXT
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $1, %eax
+    jmp     MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    xorl    %eax, %eax
+    jmp     MterpDone
+
+/*
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    movl    $1, %eax
+    jmp     MterpDone
+MterpReturn:
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
+    movq    %rax, (%rdx)
+    movl    $1, %eax
+MterpDone:
+    /* pop up frame */
+    addq    $FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset -FRAME_SIZE
+
+    /* Restore callee save register */
+    POP %r15
+    POP %r14
+    POP %r13
+    POP %r12
+    POP %rbp
+    POP %rbx
+    ret
+    .cfi_endproc
+    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
+
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
index ac87945..ca3dcd9 100755
--- a/runtime/interpreter/mterp/rebuild.sh
+++ b/runtime/interpreter/mterp/rebuild.sh
@@ -20,5 +20,4 @@
 #
 set -e
 
-# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
-for arch in arm x86 arm64 ; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
+for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/interpreter/mterp/x86/bincmp.S b/runtime/interpreter/mterp/x86/bincmp.S
index 27cf6ea..c72a5cf 100644
--- a/runtime/interpreter/mterp/x86/bincmp.S
+++ b/runtime/interpreter/mterp/x86/bincmp.S
@@ -11,18 +11,13 @@
     GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $$4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
-    movl    $$2, %eax                       # assume not taken
+    movl    $$2, rINST
     j${revcmp}   1f
-    movswl  2(rPC),%eax                     # Get signed branch offset
+    movswl  2(rPC), rINST                   # Get signed branch offset
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index b83f7e1..785efdc 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -32,16 +32,18 @@
 
 SYMBOL(ExecuteMterpImpl):
     .cfi_startproc
+    .cfi_def_cfa esp, 4
+
+    /* Spill callee save regs */
+    PUSH    %ebp
+    PUSH    %edi
+    PUSH    %esi
+    PUSH    %ebx
+
     /* Allocate frame */
     subl    $$FRAME_SIZE, %esp
     .cfi_adjust_cfa_offset FRAME_SIZE
 
-    /* Spill callee save regs */
-    movl    %ebp, EBP_SPILL(%esp)
-    movl    %edi, EDI_SPILL(%esp)
-    movl    %esi, ESI_SPILL(%esp)
-    movl    %ebx, EBX_SPILL(%esp)
-
     /* Load ShadowFrame pointer */
     movl    IN_ARG2(%esp), %edx
 
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index a1532fa..3965ecd 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -12,7 +12,6 @@
  * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
  * TUNING: for consistency, we may want to just go ahead and handle these here.
  */
-#define MTERP_LOGGING 0
 common_errDivideByZero:
     EXPORT_PC
 #if MTERP_LOGGING
@@ -116,13 +115,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movl    OFF_FP_CODE_ITEM(rFP), %eax
     movl    OFF_FP_DEX_PC(rFP), %ecx
     lea     CODEITEM_INSNS_OFFSET(%eax), rPC
     lea     (rPC, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
@@ -143,6 +146,21 @@
     GOTO_NEXT
 
 /*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
+    movl    %ecx, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $$1, %eax
+    jmp     MterpDone
+
+/*
  * Bail out to reference interpreter.
  */
 MterpFallback:
@@ -171,16 +189,15 @@
     movl    %ecx, 4(%edx)
     mov     $$1, %eax
 MterpDone:
-    /* Restore callee save register */
-    movl    EBP_SPILL(%esp), %ebp
-    movl    EDI_SPILL(%esp), %edi
-    movl    ESI_SPILL(%esp), %esi
-    movl    EBX_SPILL(%esp), %ebx
-
     /* pop up frame */
     addl    $$FRAME_SIZE, %esp
     .cfi_adjust_cfa_offset -FRAME_SIZE
-    ret
 
+    /* Restore callee save register */
+    POP     %ebx
+    POP     %esi
+    POP     %edi
+    POP     %ebp
+    ret
     .cfi_endproc
     SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 3fbbbf9..5729b90 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -105,25 +105,32 @@
     #define SYMBOL(name) name
 #endif
 
+.macro PUSH _reg
+    pushl \_reg
+    .cfi_adjust_cfa_offset 4
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popl \_reg
+    .cfi_adjust_cfa_offset -4
+    .cfi_restore \_reg
+.endm
+
 /* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address
+ * Remember about 4 bytes for return address + 4 * 4 for spills
  */
-#define FRAME_SIZE     44
+#define FRAME_SIZE     28
 
 /* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3        (FRAME_SIZE + 16)
-#define IN_ARG2        (FRAME_SIZE + 12)
-#define IN_ARG1        (FRAME_SIZE +  8)
-#define IN_ARG0        (FRAME_SIZE +  4)
-#define CALLER_RP      (FRAME_SIZE +  0)
+#define IN_ARG3        (FRAME_SIZE + 16 + 16)
+#define IN_ARG2        (FRAME_SIZE + 16 + 12)
+#define IN_ARG1        (FRAME_SIZE + 16 +  8)
+#define IN_ARG0        (FRAME_SIZE + 16 +  4)
 /* Spill offsets relative to %esp */
-#define EBP_SPILL      (FRAME_SIZE -  4)
-#define EDI_SPILL      (FRAME_SIZE -  8)
-#define ESI_SPILL      (FRAME_SIZE - 12)
-#define EBX_SPILL      (FRAME_SIZE - 16)
-#define LOCAL0         (FRAME_SIZE - 20)
-#define LOCAL1         (FRAME_SIZE - 24)
-#define LOCAL2         (FRAME_SIZE - 28)
+#define LOCAL0         (FRAME_SIZE -  4)
+#define LOCAL1         (FRAME_SIZE -  8)
+#define LOCAL2         (FRAME_SIZE - 12)
 /* Out Arg offsets, relative to %esp */
 #define OUT_ARG3       ( 12)
 #define OUT_ARG2       (  8)
@@ -156,13 +163,26 @@
 #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
 #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
 
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
 /*
- *
- * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
- * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
- * mterp should do so as well.
+ * Profile branch. rINST should contain the offset. %eax is scratch.
  */
-#define MTERP_SUSPEND 0
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movl    rSELF, %eax
+    movl    %eax, OUT_ARG0(%esp)
+    leal    OFF_FP_SHADOWFRAME(rFP), %eax
+    movl    %eax, OUT_ARG1(%esp)
+    movl    rINST, OUT_ARG2(%esp)
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+    RESTORE_IBASE
+#endif
+.endm
 
 /*
  * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index bbd88cf..c23053b 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -16,5 +16,10 @@
     call    SYMBOL($helper)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/op_goto.S b/runtime/interpreter/mterp/x86/op_goto.S
index 411399d..9a87361 100644
--- a/runtime/interpreter/mterp/x86/op_goto.S
+++ b/runtime/interpreter/mterp/x86/op_goto.S
@@ -5,15 +5,10 @@
  * double to get a byte offset.
  */
     /* goto +AA */
-    movsbl  rINSTbl, %eax                   # eax <- ssssssAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movsbl  rINSTbl, rINST                  # rINST <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle      MterpCheckSuspendAndContinue   # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/op_goto_16.S b/runtime/interpreter/mterp/x86/op_goto_16.S
index 4f04f9e..a25c31b 100644
--- a/runtime/interpreter/mterp/x86/op_goto_16.S
+++ b/runtime/interpreter/mterp/x86/op_goto_16.S
@@ -5,15 +5,10 @@
  * double to get a byte offset.
  */
     /* goto/16 +AAAA */
-    movswl  2(rPC), %eax                    # eax <- ssssAAAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movswl  2(rPC), rINST                   # rINST <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/op_goto_32.S b/runtime/interpreter/mterp/x86/op_goto_32.S
index 48f6e5a..159128b 100644
--- a/runtime/interpreter/mterp/x86/op_goto_32.S
+++ b/runtime/interpreter/mterp/x86/op_goto_32.S
@@ -10,15 +10,10 @@
  * offset to byte offset.
  */
     /* goto/32 +AAAAAAAA */
-    movl    2(rPC), %eax                    # eax <- AAAAAAAA
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    movl    2(rPC), rINST                   # rINST <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # rINST <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/op_packed_switch.S b/runtime/interpreter/mterp/x86/op_packed_switch.S
index 230b58e..e33cf75 100644
--- a/runtime/interpreter/mterp/x86/op_packed_switch.S
+++ b/runtime/interpreter/mterp/x86/op_packed_switch.S
@@ -15,15 +15,11 @@
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
     call    SYMBOL($func)
-    addl    %eax, %eax
-    leal    (rPC, %eax), rPC
+    movl    %eax, rINST
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST
+    leal    (rPC, rINST), rPC
     FETCH_INST
     REFRESH_IBASE
-    jg      1f
-#if MTERP_SUSPEND
-    #     REFRESH_IBASE - we did it above.
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/zcmp.S b/runtime/interpreter/mterp/x86/zcmp.S
index 5ce4f0f..0f28d1a 100644
--- a/runtime/interpreter/mterp/x86/zcmp.S
+++ b/runtime/interpreter/mterp/x86/zcmp.S
@@ -7,18 +7,13 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $$0, VREG_ADDRESS(rINST)        # compare (vA, 0)
-    movl    $$2, %eax                       # assume branch not taken
+    movl    $$2, rINST
     j${revcmp}   1f
-    movswl  2(rPC),%eax                     # fetch signed displacement
+    movswl  2(rPC), rINST                   # fetch signed displacement
 1:
-    addl    %eax, %eax                      # eax <- AA * 2
-    leal    (rPC, %eax), rPC
+    MTERP_PROFILE_BRANCH
+    addl    rINST, rINST                    # eax <- AA * 2
+    leal    (rPC, rINST), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/alt_stub.S b/runtime/interpreter/mterp/x86_64/alt_stub.S
new file mode 100644
index 0000000..6fcebbb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/alt_stub.S
@@ -0,0 +1,17 @@
+/*
+ * Inter-instruction transfer stub.  Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.  Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+    .extern MterpCheckBefore
+    EXPORT_PC
+    REFRESH_IBASE
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
+    jmp     .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S
new file mode 100644
index 0000000..a16050b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bincmp.S
@@ -0,0 +1,23 @@
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    movl    $$2, rINST                      # assume not taken
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/bindiv.S b/runtime/interpreter/mterp/x86_64/bindiv.S
new file mode 100644
index 0000000..e10d1dc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindiv.S
@@ -0,0 +1,34 @@
+%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    .if $wide
+    GET_WIDE_VREG %rax, %rax                # eax <- vBB
+    GET_WIDE_VREG $second, %rcx             # ecx <- vCC
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    GET_VREG $second, %rcx                  # ecx <- vCC
+    .endif
+    test${suffix}   $second, $second
+    jz      common_errDivideByZero
+    cmp${suffix}  $$-1, $second
+    je      2f
+    $ext                                    # rdx:rax <- sign-extended of rax
+    idiv${suffix}   $second
+1:
+    .if $wide
+    SET_WIDE_VREG $result, rINSTq           # eax <- vBB
+    .else
+    SET_VREG $result, rINSTq                # eax <- vBB
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xor${suffix} $result, $result
+    .else
+    neg${suffix} $result
+    .endif
+    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindiv2addr.S b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
new file mode 100644
index 0000000..8b9bc95
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
@@ -0,0 +1,35 @@
+%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $$4, %ecx                       # rcx <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    .if $wide
+    GET_WIDE_VREG %rax, rINSTq              # eax <- vA
+    GET_WIDE_VREG $second, %rcx             # ecx <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vA
+    GET_VREG $second, %rcx                  # ecx <- vB
+    .endif
+    test${suffix}   $second, $second
+    jz      common_errDivideByZero
+    cmp${suffix}  $$-1, $second
+    je      2f
+    $ext                                    # rdx:rax <- sign-extended of rax
+    idiv${suffix}   $second
+1:
+    .if $wide
+    SET_WIDE_VREG $result, rINSTq           # vA <- result
+    .else
+    SET_VREG $result, rINSTq                # vA <- result
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+    .if $rem
+    xor${suffix} $result, $result
+    .else
+    neg${suffix} $result
+    .endif
+    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit16.S b/runtime/interpreter/mterp/x86_64/bindivLit16.S
new file mode 100644
index 0000000..80dbce2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindivLit16.S
@@ -0,0 +1,27 @@
+%default {"result":"","rem":"0"}
+/*
+ * 32-bit binary div/rem operation.  Handles special case of op1=-1.
+ */
+    /* div/rem/lit16 vA, vB, #+CCCC */
+    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    testl   %ecx, %ecx
+    jz      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG $result, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xorl    $result, $result
+    .else
+    negl    $result
+    .endif
+    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit8.S b/runtime/interpreter/mterp/x86_64/bindivLit8.S
new file mode 100644
index 0000000..ab535f3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindivLit8.S
@@ -0,0 +1,25 @@
+%default {"result":"","rem":"0"}
+/*
+ * 32-bit div/rem "lit8" binary operation.  Handles special case of
+ * op0=minint & op1=-1
+ */
+    /* div/rem/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
+    GET_VREG  %eax, %rax                    # eax <- rBB
+    testl   %ecx, %ecx
+    je      common_errDivideByZero
+    cmpl    $$-1, %ecx
+    je      2f
+    cdq                                     # rax <- sign-extended of eax
+    idivl   %ecx
+1:
+    SET_VREG $result, rINSTq                # vA <- result
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    .if $rem
+    xorl    $result, $result
+    .else
+    negl    $result
+    .endif
+    jmp     1b
diff --git a/runtime/interpreter/mterp/x86_64/binop.S b/runtime/interpreter/mterp/x86_64/binop.S
new file mode 100644
index 0000000..962dd61
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop.S
@@ -0,0 +1,17 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit binary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ *      xor-int, shl-int, shr-int, ushr-int
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB
+    $instr                                  # ex: addl    (rFP,%rcx,4),%eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop1.S b/runtime/interpreter/mterp/x86_64/binop1.S
new file mode 100644
index 0000000..bdd5732
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop1.S
@@ -0,0 +1,19 @@
+%default {"wide":"0"}
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %ecx, %rcx                     # eax <- vCC
+    .if $wide
+    GET_WIDE_VREG %rax, %rax                # rax <- vBB
+    $instr                                  # ex: addl    %ecx,%eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, %rax                     # eax <- vBB
+    $instr                                  # ex: addl    %ecx,%eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop2addr.S b/runtime/interpreter/mterp/x86_64/binop2addr.S
new file mode 100644
index 0000000..4448a81
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop2addr.S
@@ -0,0 +1,19 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    $instr                                  # for ex: addl   %eax,(rFP,%ecx,4)
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/binopLit16.S b/runtime/interpreter/mterp/x86_64/binopLit16.S
new file mode 100644
index 0000000..de43b53
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopLit16.S
@@ -0,0 +1,19 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ *      and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+    /* binop/lit16 vA, vB, #+CCCC */
+    movl    rINST, %eax                     # rax <- 000000BA
+    sarl    $$4, %eax                       # eax <- B
+    GET_VREG %eax, %rax                     # eax <- vB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
+    $instr                                  # for example: addl %ecx, %eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopLit8.S b/runtime/interpreter/mterp/x86_64/binopLit8.S
new file mode 100644
index 0000000..995002b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopLit8.S
@@ -0,0 +1,18 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call.  (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ *      and-int/lit8, or-int/lit8, xor-int/lit8,
+ *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+    /* binop/lit8 vAA, vBB, #+CC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movsbl  3(rPC), %ecx                    # rcx <- ssssssCC
+    GET_VREG %eax, %rax                     # eax <- rBB
+    $instr                                  # ex: addl %ecx,%eax
+    SET_VREG $result, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide.S b/runtime/interpreter/mterp/x86_64/binopWide.S
new file mode 100644
index 0000000..f92f18e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopWide.S
@@ -0,0 +1,10 @@
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rax, %rax                # rax <- v[BB]
+    $instr                                  # ex: addq   (rFP,%rcx,4),%rax
+    SET_WIDE_VREG %rax, rINSTq              # v[AA] <- rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide2addr.S b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
new file mode 100644
index 0000000..d9e6cfb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
@@ -0,0 +1,11 @@
+/*
+ * Generic 64-bit binary operation.
+ */
+    /* binop/2addr vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    $instr                                  # for ex: addq   %rax,(rFP,%rcx,4)
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/cvtfp_int.S b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
new file mode 100644
index 0000000..1472bd2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
@@ -0,0 +1,27 @@
+%default {"fp_suffix":"","i_suffix":"","max_const":"","result_reg":"","wide":""}
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint.  If it is less
+ * than minint, it should be clamped to minint.  If it is a nan, the result
+ * should be zero.  Further, the rounding mode is to truncate.
+ */
+    /* float/double to int/long vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    movs${fp_suffix}   VREG_ADDRESS(rINSTq), %xmm0
+    mov${i_suffix}  ${max_const}, ${result_reg}
+    cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
+    comis${fp_suffix}    %xmm1, %xmm0
+    jae     1f
+    jp      2f
+    cvtts${fp_suffix}2si${i_suffix}  %xmm0, ${result_reg}
+    jmp     1f
+2:
+    xor${i_suffix}    ${result_reg}, ${result_reg}
+1:
+    .if $wide
+    SET_WIDE_VREG ${result_reg}, %rcx
+    .else
+    SET_VREG ${result_reg}, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
new file mode 100644
index 0000000..69b2371
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/entry.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+    .text
+    .global SYMBOL(ExecuteMterpImpl)
+    FUNCTION_TYPE(ExecuteMterpImpl)
+
+/*
+ * On entry:
+ *  0  Thread* self
+ *  1  code_item
+ *  2  ShadowFrame
+ *  3  JValue* result_register
+ *
+ */
+
+SYMBOL(ExecuteMterpImpl):
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+
+    /* Spill callee save regs */
+    PUSH %rbx
+    PUSH %rbp
+    PUSH %r12
+    PUSH %r13
+    PUSH %r14
+    PUSH %r15
+
+    /* Allocate frame */
+    subq    $$FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset FRAME_SIZE
+
+    /* Remember the return register */
+    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+    /* Remember the code_item */
+    movq    IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+
+    /* set up "named" registers */
+    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+    leaq    (rFP, %rax, 4), rREFS
+    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+    leaq    CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
+    leaq    (rPC, %rax, 2), rPC
+    EXPORT_PC
+
+    /* Starting ibase */
+    movq    IN_ARG0, rSELF
+    REFRESH_IBASE
+
+    /* start executing the instruction at rPC */
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86_64/fallback.S b/runtime/interpreter/mterp/x86_64/fallback.S
new file mode 100644
index 0000000..8d61166
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fallback.S
@@ -0,0 +1,3 @@
+/* Transfer stub to alternate interpreter */
+    jmp     MterpFallback
+
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
new file mode 100644
index 0000000..573256b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -0,0 +1,181 @@
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogDivideByZeroException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errArrayIndex:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogArrayIndexException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNegativeArraySize:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNoSuchMethod:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNoSuchMethodException)
+#endif
+    jmp     MterpCommonFallback
+
+common_errNullObject:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogNullObjectException)
+#endif
+    jmp     MterpCommonFallback
+
+common_exceptionThrown:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogExceptionThrownException)
+#endif
+    jmp     MterpCommonFallback
+
+MterpSuspendFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    THREAD_FLAGS_OFFSET(rSELF), OUT_32_ARG2
+    call    SYMBOL(MterpLogSuspendFallback)
+#endif
+    jmp     MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary.  If there is a pending
+ * exception, handle it.  Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jz      MterpFallback
+    /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpHandleException)
+    testb   %al, %al
+    jz      MterpExceptionReturn
+    movq    OFF_FP_CODE_ITEM(rFP), %rax
+    mov     OFF_FP_DEX_PC(rFP), %ecx
+    leaq    CODEITEM_INSNS_OFFSET(%rax), rPC
+    leaq    (rPC, %rcx, 2), rPC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    /* resume execution at catch block */
+    REFRESH_IBASE
+    FETCH_INST
+    GOTO_NEXT
+    /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request.  Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+    REFRESH_IBASE
+    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GOTO_NEXT
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $$1, %eax
+    jmp     MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+    EXPORT_PC
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    call    SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+    xorl    %eax, %eax
+    jmp     MterpDone
+
+/*
+ * On entry:
+ *  uint32_t* rFP  (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+    movl    $$1, %eax
+    jmp     MterpDone
+MterpReturn:
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
+    movq    %rax, (%rdx)
+    movl    $$1, %eax
+MterpDone:
+    /* pop up frame */
+    addq    $$FRAME_SIZE, %rsp
+    .cfi_adjust_cfa_offset -FRAME_SIZE
+
+    /* Restore callee save register */
+    POP %r15
+    POP %r14
+    POP %r13
+    POP %r12
+    POP %rbp
+    POP %rbx
+    ret
+    .cfi_endproc
+    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86_64/fpcmp.S b/runtime/interpreter/mterp/x86_64/fpcmp.S
new file mode 100644
index 0000000..806bc2b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fpcmp.S
@@ -0,0 +1,35 @@
+%default {"suff":"d","nanval":"pos"}
+/*
+ * Compare two floating-point values.  Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ *     if (x == y) {
+ *         return 0;
+ *     } else if (x < y) {
+ *         return -1;
+ *     } else if (x > y) {
+ *         return 1;
+ *     } else {
+ *         return nanval ? 1 : -1;
+ *     }
+ * }
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx<- CC
+    movzbq  2(rPC), %rax                    # eax<- BB
+    movs${suff} VREG_ADDRESS(%rax), %xmm0
+    xor     %eax, %eax
+    ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
+    jp      .L${opcode}_nan_is_${nanval}
+    je      .L${opcode}_finish
+    jb      .L${opcode}_less
+.L${opcode}_nan_is_pos:
+    addb    $$1, %al
+    jmp     .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+    movl    $$-1, %eax
+.L${opcode}_finish:
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/fpcvt.S b/runtime/interpreter/mterp/x86_64/fpcvt.S
new file mode 100644
index 0000000..657869e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fpcvt.S
@@ -0,0 +1,17 @@
+%default {"source_suffix":"","dest_suffix":"","wide":""}
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    cvts${source_suffix}2s${dest_suffix}    VREG_ADDRESS(rINSTq), %xmm0
+    .if $wide
+    movsd   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_WIDE_REF %rcx
+    .else
+    movss   %xmm0, VREG_ADDRESS(%rcx)
+    CLEAR_REF %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
new file mode 100644
index 0000000..eb84ea1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+  Art assembly interpreter notes:
+
+  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+  handle invoke, allows higher-level code to create frame & shadow frame.
+
+  Once that's working, support direct entry code & eliminate shadow frame (and
+  excess locals allocation.
+
+  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
+  base of the vreg array within the shadow frame.  Access the other fields,
+  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
+  the shadow frame mechanism of double-storing object references - via rFP &
+  number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+   rbx, rbp, r12-r15
+Return regs:
+   32-bit in eax
+   64-bit in rax
+   fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left.  On entry to target, first
+param is at 8(%esp).  Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+  nick     reg   purpose
+  rSELF    rbp   pointer to ThreadSelf.
+  rPC      r12   interpreted program counter, used for fetching instructions
+  rFP      r13   interpreted frame pointer, used for accessing locals and args
+  rINSTw   bx    first 16-bit code of current instruction
+  rINSTbl  bl    opcode portion of instruction word
+  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
+  rIBASE   r14   base of instruction handler table
+  rREFS    r15   base of object references in shadow frame.
+
+Notes:
+   o High order 16 bits of ebx must be zero on entry to handler
+   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations.  Each macro MUST emit only
+one instruction to make instruction-counting easier.  They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $$(value)
+    #define FUNCTION_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+#else
+    #define MACRO_LITERAL(value) $$value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+#endif
+
+.macro PUSH _reg
+    pushq \_reg
+    .cfi_adjust_cfa_offset 8
+    .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+    popq \_reg
+    .cfi_adjust_cfa_offset -8
+    .cfi_restore \_reg
+.endm
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE     8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3        %rcx
+#define IN_ARG2        %rdx
+#define IN_ARG1        %rsi
+#define IN_ARG0        %rdi
+/* Out Args  */
+#define OUT_ARG3       %rcx
+#define OUT_ARG2       %rdx
+#define OUT_ARG1       %rsi
+#define OUT_ARG0       %rdi
+#define OUT_32_ARG3    %ecx
+#define OUT_32_ARG2    %edx
+#define OUT_32_ARG1    %esi
+#define OUT_32_ARG0    %edi
+#define OUT_FP_ARG1    %xmm1
+#define OUT_FP_ARG0    %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF    %rbp
+#define rPC      %r12
+#define rFP      %r13
+#define rINST    %ebx
+#define rINSTq   %rbx
+#define rINSTw   %bx
+#define rINSTbh  %bh
+#define rINSTbl  %bl
+#define rIBASE   %r14
+#define rREFS    %r15
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
+ * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * Profile branch. rINST should contain the offset. %eax is scratch.
+ */
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+#endif
+.endm
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array.  For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE
+    movq    THREAD_CURRENT_IBASE_OFFSET(rSELF), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+    movb    rINSTbl, rINSTbh
+    movb    $$\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    movzwq  (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+    movzx   rINSTbl,%eax
+    movzbl  rINSTbh,rINST
+    shll    MACRO_LITERAL(${handler_size_bits}), %eax
+    addq    rIBASE, %rax
+    jmp     *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+    leaq    2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+    ADVANCE_PC \_count
+    FETCH_INST
+    GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+    movl    (rFP,\_vreg,4), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+    movq    (rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+    movl    \_reg, (rFP,\_vreg,4)
+    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+    movq    \_reg, (rFP,\_vreg,4)
+    xorq    \_reg, \_reg
+    movq    \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+    movl    \_reg, (rFP,\_vreg,4)
+    movl    \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+    movl    4(rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+    movl    \_reg, 4(rFP,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_REF _vreg
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
new file mode 100644
index 0000000..f7e6155
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -0,0 +1,22 @@
+%default { "helper":"UndefinedInvokeHandler" }
+/*
+ * Generic invoke handler wrapper.
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    .extern $helper
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movq    rPC, OUT_ARG2
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_32_ARG3
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double.S b/runtime/interpreter/mterp/x86_64/op_add_double.S
new file mode 100644
index 0000000..cb462cb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
new file mode 100644
index 0000000..063bde3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float.S b/runtime/interpreter/mterp/x86_64/op_add_float.S
new file mode 100644
index 0000000..7753bf8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
new file mode 100644
index 0000000..6c8005b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int.S b/runtime/interpreter/mterp/x86_64/op_add_int.S
new file mode 100644
index 0000000..e316be7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"addl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
new file mode 100644
index 0000000..2ff8293
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"addl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
new file mode 100644
index 0000000..bfeb7ca
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
new file mode 100644
index 0000000..8954844
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"addl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long.S b/runtime/interpreter/mterp/x86_64/op_add_long.S
new file mode 100644
index 0000000..89131ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"addq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
new file mode 100644
index 0000000..fed98bc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"addq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aget.S b/runtime/interpreter/mterp/x86_64/op_aget.S
new file mode 100644
index 0000000..58d4948
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget.S
@@ -0,0 +1,24 @@
+%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
+/*
+ * Array get, 32 bits or less.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    movq    $data_offset(%rax,%rcx,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    $load   $data_offset(%rax,%rcx,$shift), %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
new file mode 100644
index 0000000..cf7bdb5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_byte.S b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
new file mode 100644
index 0000000..1cbb569
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_char.S b/runtime/interpreter/mterp/x86_64/op_aget_char.S
new file mode 100644
index 0000000..45c9085
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_object.S b/runtime/interpreter/mterp/x86_64/op_aget_object.S
new file mode 100644
index 0000000..8baedea
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_object.S
@@ -0,0 +1,16 @@
+/*
+ * Array object get.  vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG OUT_32_ARG0, %rax              # eax <- vBB (array object)
+    GET_VREG OUT_32_ARG1, %rcx              # ecx <- vCC (requested index)
+    EXPORT_PC
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    SET_VREG_OBJECT %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_short.S b/runtime/interpreter/mterp/x86_64/op_aget_short.S
new file mode 100644
index 0000000..82c4a1d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_wide.S b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
new file mode 100644
index 0000000..4f2771b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int.S b/runtime/interpreter/mterp/x86_64/op_and_int.S
new file mode 100644
index 0000000..4469889
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"andl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
new file mode 100644
index 0000000..16315bb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"andl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
new file mode 100644
index 0000000..63e851b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
new file mode 100644
index 0000000..da7a20f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"andl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long.S b/runtime/interpreter/mterp/x86_64/op_and_long.S
new file mode 100644
index 0000000..ce1dd26
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"andq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
new file mode 100644
index 0000000..d17ab8d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"andq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aput.S b/runtime/interpreter/mterp/x86_64/op_aput.S
new file mode 100644
index 0000000..11500ad
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput.S
@@ -0,0 +1,23 @@
+%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
+/*
+ * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %eax, %rax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %rcx                     # ecx <- vCC (requested index)
+    testl   %eax, %eax                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    $store    $reg, $data_offset(%rax,%rcx,$shift)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
new file mode 100644
index 0000000..7d77a86
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_byte.S b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
new file mode 100644
index 0000000..7a1723e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_char.S b/runtime/interpreter/mterp/x86_64/op_aput_char.S
new file mode 100644
index 0000000..f8f50a3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_object.S b/runtime/interpreter/mterp/x86_64/op_aput_object.S
new file mode 100644
index 0000000..b1bae0f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_object.S
@@ -0,0 +1,13 @@
+/*
+ * Store an object into an array.  vBB[vCC] <- vAA.
+ */
+    /* op vAA, vBB, vCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpAputObject)         # (array, index)
+    testb   %al, %al
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_short.S b/runtime/interpreter/mterp/x86_64/op_aput_short.S
new file mode 100644
index 0000000..481fd68
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_wide.S b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
new file mode 100644
index 0000000..5bbd39b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTq", "store":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_array_length.S b/runtime/interpreter/mterp/x86_64/op_array_length.S
new file mode 100644
index 0000000..e80d665
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_array_length.S
@@ -0,0 +1,12 @@
+/*
+ * Return the length of an array.
+ */
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
+    testl   %ecx, %ecx                      # is null?
+    je      common_errNullObject
+    andb    $$0xf, %al                      # eax <- A
+    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+    SET_VREG rINST, %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_check_cast.S b/runtime/interpreter/mterp/x86_64/op_check_cast.S
new file mode 100644
index 0000000..f8fa7b2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_check_cast.S
@@ -0,0 +1,13 @@
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+    /* check-cast vAA, class@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG1
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmp_long.S b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
new file mode 100644
index 0000000..23ca3e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
@@ -0,0 +1,17 @@
+/*
+ * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+    /* cmp-long vAA, vBB, vCC */
+    movzbq  2(rPC), %rdx                    # edx <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_WIDE_VREG %rdx, %rdx                # rdx <- v[BB]
+    xorl    %eax, %eax
+    xorl    %edi, %edi
+    addb    $$1, %al
+    movl    $$-1, %esi
+    cmpq    VREG_ADDRESS(%rcx), %rdx
+    cmovl   %esi, %edi
+    cmovg   %eax, %edi
+    SET_VREG %edi, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
new file mode 100644
index 0000000..7c0aa1b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
new file mode 100644
index 0000000..14e8472
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
new file mode 100644
index 0000000..1d4c424
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
new file mode 100644
index 0000000..97a12a6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_const.S b/runtime/interpreter/mterp/x86_64/op_const.S
new file mode 100644
index 0000000..3cfafdb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const.S
@@ -0,0 +1,4 @@
+    /* const vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # grab all 32 bits at once
+    SET_VREG %eax, rINSTq                   # vAA<- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_16.S b/runtime/interpreter/mterp/x86_64/op_const_16.S
new file mode 100644
index 0000000..1a139c6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_16.S
@@ -0,0 +1,4 @@
+    /* const/16 vAA, #+BBBB */
+    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
+    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_4.S b/runtime/interpreter/mterp/x86_64/op_const_4.S
new file mode 100644
index 0000000..23c4816
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_4.S
@@ -0,0 +1,7 @@
+    /* const/4 vA, #+B */
+    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
+    movl    $$0xf, rINST
+    andl    %eax, rINST                     # rINST <- A
+    sarl    $$4, %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_const_class.S b/runtime/interpreter/mterp/x86_64/op_const_class.S
new file mode 100644
index 0000000..494920a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_class.S
@@ -0,0 +1,10 @@
+    /* const/class vAA, Class@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # eax <- OUT_ARG0
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstClass)         # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_high16.S b/runtime/interpreter/mterp/x86_64/op_const_high16.S
new file mode 100644
index 0000000..64e633c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_high16.S
@@ -0,0 +1,5 @@
+    /* const/high16 vAA, #+BBBB0000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    $$16, %eax                      # eax <- BBBB0000
+    SET_VREG %eax, rINSTq                   # vAA <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string.S b/runtime/interpreter/mterp/x86_64/op_const_string.S
new file mode 100644
index 0000000..7c199ec
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_string.S
@@ -0,0 +1,10 @@
+    /* const/string vAA, String@BBBB */
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # OUT_ARG0 <- BBBB
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
new file mode 100644
index 0000000..ae03d20
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
@@ -0,0 +1,10 @@
+    /* const/string vAA, String@BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- BBBB
+    movq    rINSTq, OUT_ARG1
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide.S b/runtime/interpreter/mterp/x86_64/op_const_wide.S
new file mode 100644
index 0000000..5615177
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide.S
@@ -0,0 +1,4 @@
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
new file mode 100644
index 0000000..593b624
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
@@ -0,0 +1,4 @@
+    /* const-wide/16 vAA, #+BBBB */
+    movswq  2(rPC), %rax                    # rax <- ssssBBBB
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
new file mode 100644
index 0000000..5ef3636
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
@@ -0,0 +1,4 @@
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
new file mode 100644
index 0000000..b86b4e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
@@ -0,0 +1,5 @@
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    movzwq  2(rPC), %rax                    # eax <- 0000BBBB
+    salq    $$48, %rax                      # eax <- BBBB0000
+    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double.S b/runtime/interpreter/mterp/x86_64/op_div_double.S
new file mode 100644
index 0000000..45c700c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
new file mode 100644
index 0000000..83f270e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float.S b/runtime/interpreter/mterp/x86_64/op_div_float.S
new file mode 100644
index 0000000..aa90b24
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
new file mode 100644
index 0000000..f0f8f1a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int.S b/runtime/interpreter/mterp/x86_64/op_div_int.S
new file mode 100644
index 0000000..bba5a17
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
new file mode 100644
index 0000000..fa4255d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
new file mode 100644
index 0000000..3fa1e09
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit16.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
new file mode 100644
index 0000000..859883e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit8.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long.S b/runtime/interpreter/mterp/x86_64/op_div_long.S
new file mode 100644
index 0000000..a061a88
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_long.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
new file mode 100644
index 0000000..8886e68
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_float.S b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
new file mode 100644
index 0000000..cea1482
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"d","dest_suffix":"s","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_int.S b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
new file mode 100644
index 0000000..a9965ed
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_long.S b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
new file mode 100644
index 0000000..179e6a1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
new file mode 100644
index 0000000..626bad4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
@@ -0,0 +1,9 @@
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
+    leaq    (rPC,%rcx,2), OUT_ARG1          # OUT_ARG1 <- PC + BBBBbbbb*2
+    GET_VREG OUT_32_ARG0, rINSTq            # OUT_ARG0 <- vAA (array object)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
new file mode 100644
index 0000000..a7f7ddc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
@@ -0,0 +1,17 @@
+%default { "helper":"MterpFilledNewArray" }
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    .extern $helper
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    movq    rSELF, OUT_ARG2
+    call    SYMBOL($helper)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
new file mode 100644
index 0000000..4ca79a3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "x86_64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_double.S b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
new file mode 100644
index 0000000..7855205
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"s","dest_suffix":"d","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_int.S b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
new file mode 100644
index 0000000..cb90555
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_long.S b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
new file mode 100644
index 0000000..96bb4ee
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S
new file mode 100644
index 0000000..c4fc976
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto.S
@@ -0,0 +1,14 @@
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto +AA */
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S
new file mode 100644
index 0000000..8cb9a5c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto_16.S
@@ -0,0 +1,14 @@
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto/16 +AAAA */
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S
new file mode 100644
index 0000000..4ecdacd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto_32.S
@@ -0,0 +1,17 @@
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ *  Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+    /* goto/32 +AAAAAAAA */
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eq.S b/runtime/interpreter/mterp/x86_64/op_if_eq.S
new file mode 100644
index 0000000..d56ce72
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_eq.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eqz.S b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
new file mode 100644
index 0000000..a0fc444
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ge.S b/runtime/interpreter/mterp/x86_64/op_if_ge.S
new file mode 100644
index 0000000..a7832ef
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ge.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gez.S b/runtime/interpreter/mterp/x86_64/op_if_gez.S
new file mode 100644
index 0000000..f9af5db
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gt.S b/runtime/interpreter/mterp/x86_64/op_if_gt.S
new file mode 100644
index 0000000..70f2b9e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gt.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gtz.S b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
new file mode 100644
index 0000000..2fb0d50
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_le.S b/runtime/interpreter/mterp/x86_64/op_if_le.S
new file mode 100644
index 0000000..321962a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_le.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lez.S b/runtime/interpreter/mterp/x86_64/op_if_lez.S
new file mode 100644
index 0000000..d3dc334
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_lez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lt.S b/runtime/interpreter/mterp/x86_64/op_if_lt.S
new file mode 100644
index 0000000..f028005
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_lt.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ltz.S b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
new file mode 100644
index 0000000..383d73a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ne.S b/runtime/interpreter/mterp/x86_64/op_if_ne.S
new file mode 100644
index 0000000..ac6e063
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ne.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_nez.S b/runtime/interpreter/mterp/x86_64/op_if_nez.S
new file mode 100644
index 0000000..c96e4f3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_nez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
new file mode 100644
index 0000000..a0d0faf
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget.S
@@ -0,0 +1,27 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode", "wide":"0"}
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+    EXPORT_PC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    movzwl  2(rPC), OUT_32_ARG0             # eax <- field ref CCCC
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL($helper)
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <-value
+    .else
+    .if $wide
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <-value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <-value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
new file mode 100644
index 0000000..6ac5523
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
new file mode 100644
index 0000000..07139c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
new file mode 100644
index 0000000..6a861b1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
new file mode 100644
index 0000000..07139c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
new file mode 100644
index 0000000..021a0f1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
new file mode 100644
index 0000000..8cb3be3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
new file mode 100644
index 0000000..d92bc9c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
new file mode 100644
index 0000000..964d20a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
@@ -0,0 +1,14 @@
+    /* For: iget-object-quick */
+    /* op vA, vB, offset@CCCC */
+    .extern artIGetObjectFromMterp
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG0, %rcx              # vB (object we're operating on)
+    movzwl  2(rPC), OUT_32_ARG1             # eax <- field byte offset
+    EXPORT_PC
+    callq   SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException                  # bail out
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
new file mode 100644
index 0000000..bfb7530
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
@@ -0,0 +1,18 @@
+%default { "load":"movl", "wide":"0"}
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf,rINSTbl                   # rINST <- A
+    .if $wide
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    ${load} (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
new file mode 100644
index 0000000..f158bea
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
new file mode 100644
index 0000000..56ca858
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
new file mode 100644
index 0000000..74bb9ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGet64InstanceFromCode", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
new file mode 100644
index 0000000..169d625
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movswl", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_instance_of.S b/runtime/interpreter/mterp/x86_64/op_instance_of.S
new file mode 100644
index 0000000..6be37f9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_instance_of.S
@@ -0,0 +1,21 @@
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+    /* instance-of vA, vB, class@CCCC */
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # OUT_32_ARG0 <- CCCC
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, %eax                       # eax <- B
+    leaq    VREG_ADDRESS(%rax), OUT_ARG1    # Get object address
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
+    movsbl  %al, %eax
+    cmpq    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    andb    $$0xf, rINSTbl                  # rINSTbl <- A
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
new file mode 100644
index 0000000..f4e578f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movsbl  %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_char.S b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
new file mode 100644
index 0000000..c1bf17f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movzwl  %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_double.S b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
new file mode 100644
index 0000000..27ebf42
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_float.S b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
new file mode 100644
index 0000000..5a98d44
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sl","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_long.S b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
new file mode 100644
index 0000000..9281137
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
@@ -0,0 +1,8 @@
+    /* int to long vA, vB */
+    movzbq  rINSTbl, %rax                   # rax <- +A
+    sarl    $$4, %eax                       # eax <- B
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    movslq  VREG_ADDRESS(%rax), %rax
+    SET_WIDE_VREG %rax, rINSTq              # v[A] <- %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_short.S b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
new file mode 100644
index 0000000..6ae6b50
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
new file mode 100644
index 0000000..9628589
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
new file mode 100644
index 0000000..09ac881
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
new file mode 100644
index 0000000..76d9cd4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeInterface" }
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
new file mode 100644
index 0000000..785b43c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static.S b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
new file mode 100644
index 0000000..dd8027d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
@@ -0,0 +1,2 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeStatic" }
+
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
new file mode 100644
index 0000000..ee26074
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super.S b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
new file mode 100644
index 0000000..d07f8d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeSuper" }
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
new file mode 100644
index 0000000..7245cfd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
new file mode 100644
index 0000000..19c708b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtual" }
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..313bd05
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
new file mode 100644
index 0000000..424ad32
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..556f718
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput.S b/runtime/interpreter/mterp/x86_64/op_iput.S
new file mode 100644
index 0000000..6b7cb1c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput.S
@@ -0,0 +1,20 @@
+%default { "handler":"artSet32InstanceFromMterp"}
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+    /* op vA, vB, field@CCCC */
+    .extern $handler
+    EXPORT_PC
+    movzwl  2(rPC), OUT_32_ARG0             # field ref <- 0000CCCC
+    movzbq  rINSTbl, %rcx                   # rcx<- BA
+    sarl    $$4, %ecx                       # ecx<- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $$0xf, rINSTbl                  # rINST<- A
+    GET_VREG OUT_32_ARG2, rINSTq            # fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL($handler)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
new file mode 100644
index 0000000..cb4b1cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
new file mode 100644
index 0000000..6bd060e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte.S b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
new file mode 100644
index 0000000..cb4b1cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
new file mode 100644
index 0000000..6bd060e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char.S b/runtime/interpreter/mterp/x86_64/op_iput_char.S
new file mode 100644
index 0000000..b4e147c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
new file mode 100644
index 0000000..3da96d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object.S b/runtime/interpreter/mterp/x86_64/op_iput_object.S
new file mode 100644
index 0000000..828712d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_object.S
@@ -0,0 +1,10 @@
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_32_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpIputObject)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
new file mode 100644
index 0000000..b5b128a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
@@ -0,0 +1,9 @@
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpIputObjectQuick)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
new file mode 100644
index 0000000..ecaf98e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
@@ -0,0 +1,13 @@
+%default { "reg":"rINST", "store":"movl" }
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    ${store}    ${reg}, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short.S b/runtime/interpreter/mterp/x86_64/op_iput_short.S
new file mode 100644
index 0000000..b4e147c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
new file mode 100644
index 0000000..3da96d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide.S b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
new file mode 100644
index 0000000..e59717b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
@@ -0,0 +1,14 @@
+    /* iput-wide vA, vB, field@CCCC */
+    .extern artSet64InstanceFromMterp
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG OUT_32_ARG1, %rcx              # the object pointer
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG2  # &fp[A]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG3    # referrer
+    call    SYMBOL(artSet64InstanceFromMterp)
+    testb   %al, %al
+    jnz     MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
new file mode 100644
index 0000000..473189d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
@@ -0,0 +1,12 @@
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    movzbq    rINSTbl, %rcx                 # rcx<- BA
+    sarl      $$4, %ecx                     # ecx<- B
+    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
+    testl     %ecx, %ecx                    # is object null?
+    je        common_errNullObject
+    movzwq    2(rPC), %rax                  # rax<- field byte offset
+    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
+    andb      $$0xf, rINSTbl                # rINST<- A
+    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
+    movq      %rax, (%rcx)                  # obj.field<- r0/r1
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_double.S b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
new file mode 100644
index 0000000..7cdae32
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dq","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_float.S b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
new file mode 100644
index 0000000..7553348
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sq","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_int.S b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
new file mode 100644
index 0000000..7b50c8e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "x86_64/op_move.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
new file mode 100644
index 0000000..411091f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
@@ -0,0 +1,11 @@
+/*
+ * Synchronize on an object.
+ */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
new file mode 100644
index 0000000..72d9a23
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
@@ -0,0 +1,15 @@
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction.  See the Dalvik
+ * instruction spec.
+ */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    GET_VREG OUT_32_ARG0, rINSTq
+    movq    rSELF, OUT_ARG1
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
+    testq   %rax, %rax
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move.S b/runtime/interpreter/mterp/x86_64/op_move.S
new file mode 100644
index 0000000..ccaac2c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move.S
@@ -0,0 +1,13 @@
+%default { "is_object":"0" }
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $$0xf, %al                      # eax <- A
+    shrl    $$4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_16.S b/runtime/interpreter/mterp/x86_64/op_move_16.S
new file mode 100644
index 0000000..6a813eb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_16.S
@@ -0,0 +1,12 @@
+%default { "is_object":"0" }
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwq  4(rPC), %rcx                    # ecx <- BBBB
+    movzwq  2(rPC), %rax                    # eax <- AAAA
+    GET_VREG %edx, %rcx
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_exception.S b/runtime/interpreter/mterp/x86_64/op_move_exception.S
new file mode 100644
index 0000000..d0a14fd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_exception.S
@@ -0,0 +1,5 @@
+    /* move-exception vAA */
+    movl    THREAD_EXCEPTION_OFFSET(rSELF), %eax
+    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
+    movl    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_from16.S b/runtime/interpreter/mterp/x86_64/op_move_from16.S
new file mode 100644
index 0000000..150e9c2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_from16.S
@@ -0,0 +1,11 @@
+%default { "is_object":"0" }
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzwq  2(rPC), %rax                    # eax <- BBBB
+    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
+    .if $is_object
+    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object.S b/runtime/interpreter/mterp/x86_64/op_move_object.S
new file mode 100644
index 0000000..0d86649
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_16.S b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
new file mode 100644
index 0000000..32541ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
new file mode 100644
index 0000000..983e4ab
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result.S b/runtime/interpreter/mterp/x86_64/op_move_result.S
new file mode 100644
index 0000000..8268344
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result.S
@@ -0,0 +1,11 @@
+%default { "is_object":"0" }
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movl    (%rax), %eax                    # r0 <- result.i.
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_object.S b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
new file mode 100644
index 0000000..c5aac17
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
new file mode 100644
index 0000000..03de783
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
@@ -0,0 +1,5 @@
+    /* move-result-wide vAA */
+    movq    OFF_FP_RESULT_REGISTER(rFP), %rax    # get pointer to result JType.
+    movq    (%rax), %rdx                         # Get wide
+    SET_WIDE_VREG %rdx, rINSTq                   # v[AA] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide.S b/runtime/interpreter/mterp/x86_64/op_move_wide.S
new file mode 100644
index 0000000..508f8cc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide.S
@@ -0,0 +1,8 @@
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
new file mode 100644
index 0000000..ce371a9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
@@ -0,0 +1,7 @@
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwq  4(rPC), %rcx                    # ecx<- BBBB
+    movzwq  2(rPC), %rax                    # eax<- AAAA
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
new file mode 100644
index 0000000..0d6971a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
@@ -0,0 +1,6 @@
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  2(rPC), %ecx                    # ecx <- BBBB
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double.S b/runtime/interpreter/mterp/x86_64/op_mul_double.S
new file mode 100644
index 0000000..1f4bcb3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
new file mode 100644
index 0000000..9850a28
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float.S b/runtime/interpreter/mterp/x86_64/op_mul_float.S
new file mode 100644
index 0000000..85960e9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
new file mode 100644
index 0000000..6d36b6a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int.S b/runtime/interpreter/mterp/x86_64/op_mul_int.S
new file mode 100644
index 0000000..5f3923a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"imull   (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
new file mode 100644
index 0000000..0b5af8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
@@ -0,0 +1,8 @@
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    imull   (rFP,rINSTq,4), %eax
+    SET_VREG %eax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
new file mode 100644
index 0000000..a4cfdbc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"imull   %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
new file mode 100644
index 0000000..89e9acb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"imull   %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long.S b/runtime/interpreter/mterp/x86_64/op_mul_long.S
new file mode 100644
index 0000000..2b85370
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"imulq   (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
new file mode 100644
index 0000000..167128b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
@@ -0,0 +1,8 @@
+    /* mul vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rax, %rcx                # rax <- vA
+    imulq   (rFP,rINSTq,4), %rax
+    SET_WIDE_VREG %rax, %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_double.S b/runtime/interpreter/mterp/x86_64/op_neg_double.S
new file mode 100644
index 0000000..2c14b09
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_double.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"preinstr":"    movq    $0x8000000000000000, %rsi", "instr":"    xorq    %rsi, %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_float.S b/runtime/interpreter/mterp/x86_64/op_neg_float.S
new file mode 100644
index 0000000..148b21e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_float.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"    xorl    $0x80000000, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_int.S b/runtime/interpreter/mterp/x86_64/op_neg_int.S
new file mode 100644
index 0000000..f90a937
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_int.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"    negl    %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_long.S b/runtime/interpreter/mterp/x86_64/op_neg_long.S
new file mode 100644
index 0000000..18fc3cc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_long.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"    negq    %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_new_array.S b/runtime/interpreter/mterp/x86_64/op_new_array.S
new file mode 100644
index 0000000..9831a0b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_new_array.S
@@ -0,0 +1,18 @@
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+    /* new-array vA, vB, class@CCCC */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpNewArray)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_new_instance.S b/runtime/interpreter/mterp/x86_64/op_new_instance.S
new file mode 100644
index 0000000..fc8c8cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_new_instance.S
@@ -0,0 +1,13 @@
+/*
+ * Create a new instance of a class.
+ */
+    /* new-instance vAA, class@BBBB */
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rSELF, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    call    SYMBOL(MterpNewInstance)
+    testb   %al, %al                        # 0 means an exception is thrown
+    jz      MterpPossibleException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_nop.S b/runtime/interpreter/mterp/x86_64/op_nop.S
new file mode 100644
index 0000000..4cb68e3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_nop.S
@@ -0,0 +1 @@
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_not_int.S b/runtime/interpreter/mterp/x86_64/op_not_int.S
new file mode 100644
index 0000000..463d080
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_not_int.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"    notl    %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_not_long.S b/runtime/interpreter/mterp/x86_64/op_not_long.S
new file mode 100644
index 0000000..c97bb9e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_not_long.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"    notq    %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int.S b/runtime/interpreter/mterp/x86_64/op_or_int.S
new file mode 100644
index 0000000..730310f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"orl     (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
new file mode 100644
index 0000000..f722e4d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"orl     %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
new file mode 100644
index 0000000..fee86c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
new file mode 100644
index 0000000..81104c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"orl     %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long.S b/runtime/interpreter/mterp/x86_64/op_or_long.S
new file mode 100644
index 0000000..6c70a20
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"orq     (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
new file mode 100644
index 0000000..546da1d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"orq     %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
new file mode 100644
index 0000000..cb0acb7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
@@ -0,0 +1,22 @@
+%default { "func":"MterpDoPackedSwitch" }
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movslq  2(rPC), OUT_ARG0                # rcx <- BBBBbbbb
+    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
+    call    SYMBOL($func)
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double.S b/runtime/interpreter/mterp/x86_64/op_rem_double.S
new file mode 100644
index 0000000..00aed78
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_double.S
@@ -0,0 +1,14 @@
+    /* rem_double vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    fldl    VREG_ADDRESS(%rcx)              # %st1 <- fp[vBB]
+    fldl    VREG_ADDRESS(%rax)              # %st0 <- fp[vCC]
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(rINSTq)            # fp[vAA] <- %st
+    CLEAR_WIDE_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
new file mode 100644
index 0000000..9768266
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
@@ -0,0 +1,15 @@
+    /* rem_double/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    fldl    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    fldl    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstpl   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_WIDE_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float.S b/runtime/interpreter/mterp/x86_64/op_rem_float.S
new file mode 100644
index 0000000..5af28ac
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_float.S
@@ -0,0 +1,14 @@
+    /* rem_float vAA, vBB, vCC */
+    movzbq  3(rPC), %rcx                    # ecx <- BB
+    movzbq  2(rPC), %rax                    # eax <- CC
+    flds    VREG_ADDRESS(%rcx)              # vBB to fp stack
+    flds    VREG_ADDRESS(%rax)              # vCC to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(rINSTq)            # %st to vAA
+    CLEAR_REF rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
new file mode 100644
index 0000000..e9282a8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
@@ -0,0 +1,15 @@
+    /* rem_float/2addr vA, vB */
+    movzbq  rINSTbl, %rcx                   # ecx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    flds    VREG_ADDRESS(rINSTq)            # vB to fp stack
+    andb    $$0xf, %cl                      # ecx <- A
+    flds    VREG_ADDRESS(%rcx)              # vA to fp stack
+1:
+    fprem
+    fstsw   %ax
+    sahf
+    jp      1b
+    fstp    %st(1)
+    fstps   VREG_ADDRESS(%rcx)              # %st to vA
+    CLEAR_REF %rcx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int.S b/runtime/interpreter/mterp/x86_64/op_rem_int.S
new file mode 100644
index 0000000..fd77d7c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
new file mode 100644
index 0000000..25ffbf7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
new file mode 100644
index 0000000..21cc370
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit16.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
new file mode 100644
index 0000000..2eb0150
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit8.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long.S b/runtime/interpreter/mterp/x86_64/op_rem_long.S
new file mode 100644
index 0000000..efa7215
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_long.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","ext":"cqo","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
new file mode 100644
index 0000000..ce0dd86
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","rem":"1","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S
new file mode 100644
index 0000000..14f4f8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return.S
@@ -0,0 +1,15 @@
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+    /* op vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_object.S b/runtime/interpreter/mterp/x86_64/op_return_object.S
new file mode 100644
index 0000000..1ae69a5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_return.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S
new file mode 100644
index 0000000..46a5753
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_void.S
@@ -0,0 +1,9 @@
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
new file mode 100644
index 0000000..92e3506
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
@@ -0,0 +1,7 @@
+    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    xorq    %rax, %rax
+    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S
new file mode 100644
index 0000000..f2d6e04
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_wide.S
@@ -0,0 +1,13 @@
+/*
+ * Return a 64-bit value.
+ */
+    /* return-wide vAA */
+    .extern MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
+    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+    jz      1f
+    movq    rSELF, OUT_ARG0
+    call    SYMBOL(MterpSuspendCheck)
+1:
+    GET_WIDE_VREG %rax, rINSTq              # eax <- v[AA]
+    jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int.S b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
new file mode 100644
index 0000000..2dd2002
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "x86_64/binopLit16.S" {"instr":"subl    %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
new file mode 100644
index 0000000..64d0d8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"subl    %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
new file mode 100644
index 0000000..38d9a5e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode", "wide":"0" }
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+    /* op vAA, field@BBBB */
+    .extern $helper
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref CCCC
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    movq    rSELF, OUT_ARG2                 # self
+    call    SYMBOL($helper)
+    cmpl    $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+    jnz     MterpException
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+    .else
+    .if $wide
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
new file mode 100644
index 0000000..7d358da
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
new file mode 100644
index 0000000..79d9ff4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
new file mode 100644
index 0000000..4488610
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
new file mode 100644
index 0000000..09b627e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
new file mode 100644
index 0000000..47ac238
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
new file mode 100644
index 0000000..aa22343
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGet64StaticFromCode", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int.S b/runtime/interpreter/mterp/x86_64/op_shl_int.S
new file mode 100644
index 0000000..fa1edb7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
new file mode 100644
index 0000000..dd96279
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
new file mode 100644
index 0000000..39b23ae
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"sall    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long.S b/runtime/interpreter/mterp/x86_64/op_shl_long.S
new file mode 100644
index 0000000..fdc7cb6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"salq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
new file mode 100644
index 0000000..546633f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"salq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int.S b/runtime/interpreter/mterp/x86_64/op_shr_int.S
new file mode 100644
index 0000000..fc289f4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
new file mode 100644
index 0000000..0e5bca7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
new file mode 100644
index 0000000..3cc9307
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"sarl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long.S b/runtime/interpreter/mterp/x86_64/op_shr_long.S
new file mode 100644
index 0000000..25028d3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sarq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
new file mode 100644
index 0000000..3738413
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sarq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
new file mode 100644
index 0000000..0eaa514
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "x86_64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
new file mode 100644
index 0000000..e92b032
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput.S
@@ -0,0 +1,17 @@
+%default { "helper":"artSet32StaticFromCode"}
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+    /* op vAA, field@BBBB */
+    .extern $helper
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    GET_VREG OUT_32_ARG1, rINSTq            # fp[AA]
+    movq    OFF_FP_METHOD(rFP), OUT_ARG2    # referrer
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL($helper)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
new file mode 100644
index 0000000..8718915
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
new file mode 100644
index 0000000..8718915
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
new file mode 100644
index 0000000..2fe9d14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_object.S b/runtime/interpreter/mterp/x86_64/op_sput_object.S
new file mode 100644
index 0000000..eb5a376
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_object.S
@@ -0,0 +1,10 @@
+    EXPORT_PC
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+    movq    rPC, OUT_ARG1
+    REFRESH_INST ${opnum}
+    movq    rINSTq, OUT_ARG2
+    movq    rSELF, OUT_ARG3
+    call    SYMBOL(MterpSputObject)
+    testb   %al, %al
+    jz      MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
new file mode 100644
index 0000000..2fe9d14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
new file mode 100644
index 0000000..c4bc269
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
@@ -0,0 +1,15 @@
+/*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+    /* sput-wide vAA, field@BBBB */
+    .extern artSet64IndirectStaticFromMterp
+    EXPORT_PC
+    movzwq  2(rPC), OUT_ARG0                # field ref BBBB
+    movq    OFF_FP_METHOD(rFP), OUT_ARG1    # referrer
+    leaq    VREG_ADDRESS(rINSTq), OUT_ARG2  # &fp[AA]
+    movq    rSELF, OUT_ARG3                 # self
+    call    SYMBOL(artSet64IndirectStaticFromMterp)
+    testb   %al, %al
+    jnz     MterpException
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double.S b/runtime/interpreter/mterp/x86_64/op_sub_double.S
new file mode 100644
index 0000000..952667e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
new file mode 100644
index 0000000..0bd5dbb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float.S b/runtime/interpreter/mterp/x86_64/op_sub_float.S
new file mode 100644
index 0000000..ea0ae14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
new file mode 100644
index 0000000..9dd1780
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int.S b/runtime/interpreter/mterp/x86_64/op_sub_int.S
new file mode 100644
index 0000000..560394f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"subl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
new file mode 100644
index 0000000..6f50f78
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"subl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long.S b/runtime/interpreter/mterp/x86_64/op_sub_long.S
new file mode 100644
index 0000000..7fa54e7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"subq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
new file mode 100644
index 0000000..c18be10
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"subq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_throw.S b/runtime/interpreter/mterp/x86_64/op_throw.S
new file mode 100644
index 0000000..22ed990
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_throw.S
@@ -0,0 +1,10 @@
+/*
+ * Throw an exception object in the current thread.
+ */
+    /* throw vAA */
+    EXPORT_PC
+    GET_VREG %eax, rINSTq                   # eax<- vAA (exception object)
+    testb   %al, %al
+    jz      common_errNullObject
+    movq    %rax, THREAD_EXCEPTION_OFFSET(rSELF)
+    jmp     MterpException
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3e.S b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3f.S b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_40.S b/runtime/interpreter/mterp/x86_64/op_unused_40.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_40.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_41.S b/runtime/interpreter/mterp/x86_64/op_unused_41.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_41.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_42.S b/runtime/interpreter/mterp/x86_64/op_unused_42.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_42.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_43.S b/runtime/interpreter/mterp/x86_64/op_unused_43.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_43.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_79.S b/runtime/interpreter/mterp/x86_64/op_unused_79.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_79.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_7a.S b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f4.S b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fa.S b/runtime/interpreter/mterp/x86_64/op_unused_fa.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fa.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fb.S b/runtime/interpreter/mterp/x86_64/op_unused_fb.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fb.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fc.S b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fd.S b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fe.S b/runtime/interpreter/mterp/x86_64/op_unused_fe.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fe.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_ff.S b/runtime/interpreter/mterp/x86_64/op_unused_ff.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_ff.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int.S b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
new file mode 100644
index 0000000..dd91086
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
new file mode 100644
index 0000000..d38aedd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
new file mode 100644
index 0000000..f7ff8ab
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"shrl    %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long.S b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
new file mode 100644
index 0000000..7c6daca
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"shrq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
new file mode 100644
index 0000000..cd6a22c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"shrq    %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int.S b/runtime/interpreter/mterp/x86_64/op_xor_int.S
new file mode 100644
index 0000000..b295d74
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"xorl    (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
new file mode 100644
index 0000000..879bfc0
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"xorl    %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
new file mode 100644
index 0000000..5d375a1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
new file mode 100644
index 0000000..54cce9c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"xorl    %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long.S b/runtime/interpreter/mterp/x86_64/op_xor_long.S
new file mode 100644
index 0000000..52b44e2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"xorq    (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
new file mode 100644
index 0000000..d75c4ba
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"xorq    %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/shop2addr.S b/runtime/interpreter/mterp/x86_64/shop2addr.S
new file mode 100644
index 0000000..6b06d00
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/shop2addr.S
@@ -0,0 +1,19 @@
+%default {"wide":"0"}
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+    /* shift/2addr vA, vB */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # ecx <- vBB
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    .if $wide
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vAA
+    $instr                                  # ex: sarl %cl, %eax
+    SET_WIDE_VREG %rax, rINSTq
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    $instr                                  # ex: sarl %cl, %eax
+    SET_VREG %eax, rINSTq
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop.S b/runtime/interpreter/mterp/x86_64/sseBinop.S
new file mode 100644
index 0000000..09d3364
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/sseBinop.S
@@ -0,0 +1,9 @@
+%default {"instr":"","suff":""}
+    movzbq  2(rPC), %rcx                    # ecx <- BB
+    movzbq  3(rPC), %rax                    # eax <- CC
+    movs${suff}   VREG_ADDRESS(%rcx), %xmm0       # %xmm0 <- 1st src
+    ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
+    movs${suff}   %xmm0, VREG_ADDRESS(rINSTq)     # vAA <- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff}   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
new file mode 100644
index 0000000..084166b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
@@ -0,0 +1,10 @@
+%default {"instr":"","suff":""}
+    movl    rINST, %ecx                     # ecx <- A+
+    andl    $$0xf, %ecx                     # ecx <- A
+    movs${suff} VREG_ADDRESS(%rcx), %xmm0        # %xmm0 <- 1st src
+    sarl    $$4, rINST                      # rINST<- B
+    ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
+    movs${suff} %xmm0, VREG_ADDRESS(%rcx)   # vAA<- %xmm0
+    pxor    %xmm0, %xmm0
+    movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unop.S b/runtime/interpreter/mterp/x86_64/unop.S
new file mode 100644
index 0000000..1777123
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/unop.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "instr":"", "wide":"0"}
+/*
+ * Generic 32/64-bit unary operation.  Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+    /* unop vA, vB */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4,rINST                       # rINST <- B
+    .if ${wide}
+    GET_WIDE_VREG %rax, rINSTq              # rax <- vB
+    .else
+    GET_VREG %eax, rINSTq                   # eax <- vB
+    .endif
+    andb    $$0xf,%cl                       # ecx <- A
+$preinstr
+$instr
+    .if ${wide}
+    SET_WIDE_VREG %rax, %rcx
+    .else
+    SET_VREG %eax, %rcx
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unused.S b/runtime/interpreter/mterp/x86_64/unused.S
new file mode 100644
index 0000000..c95ef94
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+    jmp     MterpFallback
diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S
new file mode 100644
index 0000000..0051407
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/zcmp.S
@@ -0,0 +1,19 @@
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $$0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    movl    $$2, rINST                      # assume branch not taken
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+1:
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
+    FETCH_INST
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
+    GOTO_NEXT
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 0e175b8..b21f1ec 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -22,11 +22,13 @@
 #include "ScopedLocalRef.h"
 
 #include "art_method-inl.h"
+#include "base/casts.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "class_linker.h"
 #include "common_throws.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "gc/reference_processor.h"
 #include "handle_scope-inl.h"
 #include "interpreter/interpreter_common.h"
 #include "mirror/array-inl.h"
@@ -261,6 +263,25 @@
   }
 }
 
+// This is required for Enum(Set) code, as that uses reflection to inspect enum classes.
+void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Special managed code cut-out to allow method lookup in a un-started runtime.
+  mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+  if (klass == nullptr) {
+    ThrowNullPointerExceptionForMethodAccess(shadow_frame->GetMethod(), InvokeType::kVirtual);
+    return;
+  }
+  mirror::String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+  mirror::ObjectArray<mirror::Class>* args =
+      shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<mirror::Class>();
+  if (Runtime::Current()->IsActiveTransaction()) {
+    result->SetL(mirror::Class::GetDeclaredMethodInternal<true>(self, klass, name, args));
+  } else {
+    result->SetL(mirror::Class::GetDeclaredMethodInternal<false>(self, klass, name, args));
+  }
+}
+
 void UnstartedRuntime::UnstartedClassGetEnclosingClass(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
   StackHandleScope<1> hs(self);
@@ -860,6 +881,155 @@
   result->SetL(string->ToCharArray(self));
 }
 
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
+void UnstartedRuntime::UnstartedReferenceGetReferent(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  mirror::Reference* const ref = down_cast<mirror::Reference*>(
+      shadow_frame->GetVRegReference(arg_offset));
+  if (ref == nullptr) {
+    AbortTransactionOrFail(self, "Reference.getReferent() with null object");
+    return;
+  }
+  mirror::Object* const referent =
+      Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
+  result->SetL(referent);
+}
+
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue. We use a somewhat
+// conservative upper bound. We restrict the callers to SynchronousQueue and ConcurrentHashMap,
+// where we can predict the behavior (somewhat).
+// Note: this is required (instead of lazy initialization) as these classes are used in the static
+//       initialization of other classes, so will *use* the value.
+void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+  std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+  if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+    // SynchronousQueue really only separates between single- and multiprocessor case. Return
+    // 8 as a conservative upper approximation.
+    result->SetI(8);
+  } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+    // ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
+    // a good upper bound.
+    // TODO: Consider resetting in the zygote?
+    result->SetI(8);
+  } else {
+    // Not supported.
+    AbortTransactionOrFail(self, "Accessing availableProcessors not allowed");
+  }
+}
+
+// This allows accessing ConcurrentHashMap/SynchronousQueue.
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapLong(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  int64_t expectedValue = shadow_frame->GetVRegLong(arg_offset + 4);
+  int64_t newValue = shadow_frame->GetVRegLong(arg_offset + 6);
+
+  // Must use non transactional mode.
+  if (kUseReadBarrier) {
+    // Need to make sure the reference stored in the field is a to-space one before attempting the
+    // CAS or the CAS could fail incorrectly.
+    mirror::HeapReference<mirror::Object>* field_addr =
+        reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+            reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+        obj,
+        MemberOffset(offset),
+        field_addr);
+  }
+  bool success;
+  // Check whether we're in a transaction, call accordingly.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    success = obj->CasFieldStrongSequentiallyConsistent64<true>(MemberOffset(offset),
+                                                                expectedValue,
+                                                                newValue);
+  } else {
+    success = obj->CasFieldStrongSequentiallyConsistent64<false>(MemberOffset(offset),
+                                                                 expectedValue,
+                                                                 newValue);
+  }
+  result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* expected_value = shadow_frame->GetVRegReference(arg_offset + 4);
+  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 5);
+
+  // Must use non transactional mode.
+  if (kUseReadBarrier) {
+    // Need to make sure the reference stored in the field is a to-space one before attempting the
+    // CAS or the CAS could fail incorrectly.
+    mirror::HeapReference<mirror::Object>* field_addr =
+        reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+            reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+        obj,
+        MemberOffset(offset),
+        field_addr);
+  }
+  bool success;
+  // Check whether we're in a transaction, call accordingly.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    success = obj->CasFieldStrongSequentiallyConsistentObject<true>(MemberOffset(offset),
+                                                                    expected_value,
+                                                                    newValue);
+  } else {
+    success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
+                                                                     expected_value,
+                                                                     newValue);
+  }
+  result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* value = obj->GetFieldObjectVolatile<mirror::Object>(MemberOffset(offset));
+  result->SetL(value);
+}
+
+void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4);
+  QuasiAtomic::ThreadFenceRelease();
+  if (Runtime::Current()->IsActiveTransaction()) {
+    obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+  } else {
+    obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+  }
+}
+
+
 void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
     Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
     uint32_t* args, JValue* result) {
@@ -906,6 +1076,17 @@
   result->SetD(exp(value.GetD()));
 }
 
+void UnstartedRuntime::UnstartedJNIAtomicLongVMSupportsCS8(
+    Thread* self ATTRIBUTE_UNUSED,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args ATTRIBUTE_UNUSED,
+    JValue* result) {
+  result->SetZ(QuasiAtomic::LongAtomicsUseMutexes(Runtime::Current()->GetInstructionSet())
+                   ? 0
+                   : 1);
+}
+
 void UnstartedRuntime::UnstartedJNIClassGetNameNative(
     Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
     uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
@@ -913,6 +1094,13 @@
   result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
 }
 
+void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble(
+    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+  uint64_t long_input = args[0] | (static_cast<uint64_t>(args[1]) << 32);
+  result->SetD(bit_cast<double>(long_input));
+}
+
 void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(
     Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
     mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 6d4d711..29f2197 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -24,6 +24,7 @@
   V(ClassClassForName, "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") \
   V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \
   V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
+  V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
   V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
   V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
   V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
@@ -40,6 +41,8 @@
   V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \
   V(MemoryPeekLong, "long libcore.io.Memory.peekLongNative(long)") \
   V(MemoryPeekByteArray, "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") \
+  V(ReferenceGetReferent, "java.lang.Object java.lang.ref.Reference.getReferent()") \
+  V(RuntimeAvailableProcessors, "int java.lang.Runtime.availableProcessors()") \
   V(SecurityGetSecurityPropertiesReader, "java.io.Reader java.security.Security.getSecurityPropertiesReader()") \
   V(StringGetCharsNoCheck, "void java.lang.String.getCharsNoCheck(int, int, char[], int)") \
   V(StringCharAt, "char java.lang.String.charAt(int)") \
@@ -47,7 +50,11 @@
   V(StringFactoryNewStringFromChars, "java.lang.String java.lang.StringFactory.newStringFromChars(int, int, char[])") \
   V(StringFactoryNewStringFromString, "java.lang.String java.lang.StringFactory.newStringFromString(java.lang.String)") \
   V(StringFastSubstring, "java.lang.String java.lang.String.fastSubstring(int, int)") \
-  V(StringToCharArray, "char[] java.lang.String.toCharArray()")
+  V(StringToCharArray, "char[] java.lang.String.toCharArray()") \
+  V(UnsafeCompareAndSwapLong, "boolean sun.misc.Unsafe.compareAndSwapLong(java.lang.Object, long, long, long)") \
+  V(UnsafeCompareAndSwapObject, "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") \
+  V(UnsafeGetObjectVolatile, "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") \
+  V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)")
 
 // Methods that are native.
 #define UNSTARTED_RUNTIME_JNI_LIST(V)           \
@@ -56,7 +63,9 @@
   V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
   V(MathLog, "double java.lang.Math.log(double)") \
   V(MathExp, "double java.lang.Math.exp(double)") \
+  V(AtomicLongVMSupportsCS8, "boolean java.util.concurrent.atomic.AtomicLong.VMSupportsCS8()") \
   V(ClassGetNameNative, "java.lang.String java.lang.Class.getNameNative()") \
+  V(DoubleLongBitsToDouble, "double java.lang.Double.longBitsToDouble(long)") \
   V(FloatFloatToRawIntBits, "int java.lang.Float.floatToRawIntBits(float)") \
   V(FloatIntBitsToFloat, "float java.lang.Float.intBitsToFloat(int)") \
   V(ObjectInternalClone, "java.lang.Object java.lang.Object.internalClone()") \
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 5c44193..191c0c7 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -716,9 +716,11 @@
   libraries_.get()->UnloadNativeLibraries();
 }
 
-bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
-                                  bool is_shared_namespace, jstring library_path,
-                                  jstring permitted_path, std::string* error_msg) {
+bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
+                                  const std::string& path,
+                                  jobject class_loader,
+                                  jstring library_path,
+                                  std::string* error_msg) {
   error_msg->clear();
 
   // See if we've already loaded this library.  If we have, and the class loader
@@ -777,9 +779,12 @@
 
   Locks::mutator_lock_->AssertNotHeld(self);
   const char* path_str = path.empty() ? nullptr : path.c_str();
-  void* handle = android::OpenNativeLibrary(env, runtime_->GetTargetSdkVersion(), path_str,
-                                            class_loader, is_shared_namespace, library_path,
-                                            permitted_path);
+  void* handle = android::OpenNativeLibrary(env,
+                                            runtime_->GetTargetSdkVersion(),
+                                            path_str,
+                                            class_loader,
+                                            library_path);
+
   bool needs_native_bridge = false;
   if (handle == nullptr) {
     if (android::NativeBridgeIsSupported(path_str)) {
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 8cae1e5..3d055cd 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -85,8 +85,10 @@
    * Returns 'true' on success. On failure, sets 'error_msg' to a
    * human-readable description of the error.
    */
-  bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
-                         bool is_shared_namespace, jstring library_path, jstring permitted_path,
+  bool LoadNativeLibrary(JNIEnv* env,
+                         const std::string& path,
+                         jobject class_loader,
+                         jstring library_path,
                          std::string* error_msg);
 
   // Unload native libraries with cleared class loaders.
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index f1f4a03..6278ef0 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -690,6 +690,19 @@
 }
 
 /*
+ * Invoke a static method on an interface.
+ */
+static JdwpError IT_InvokeMethod(JdwpState* state, Request* request,
+                                 ExpandBuf* pReply ATTRIBUTE_UNUSED)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  RefTypeId class_id = request->ReadRefTypeId();
+  ObjectId thread_id = request->ReadThreadId();
+  MethodId method_id = request->ReadMethodId();
+
+  return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
+}
+
+/*
  * Return line number information for the method, if present.
  */
 static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
@@ -1481,6 +1494,7 @@
   { 4,    1,  AT_newInstance,   "ArrayType.NewInstance" },
 
   /* InterfaceType command set (5) */
+  { 5,    1, IT_InvokeMethod,  "InterfaceType.InvokeMethod" },
 
   /* Method command set (6) */
   { 6,    1,  M_LineTable,                "Method.LineTable" },
@@ -1579,6 +1593,8 @@
     return command == kJDWPClassTypeInvokeMethodCmd || command == kJDWPClassTypeNewInstanceCmd;
   } else if (command_set == kJDWPObjectReferenceCmdSet) {
     return command == kJDWPObjectReferenceInvokeCmd;
+  } else if (command_set == kJDWPInterfaceTypeCmdSet) {
+    return command == kJDWPInterfaceTypeInvokeMethodCmd;
   } else {
     return false;
   }
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index 29314f6..4e1bda8 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -45,6 +45,8 @@
 static constexpr uint8_t kJDWPClassTypeCmdSet = 3U;
 static constexpr uint8_t kJDWPClassTypeInvokeMethodCmd = 3U;
 static constexpr uint8_t kJDWPClassTypeNewInstanceCmd = 4U;
+static constexpr uint8_t kJDWPInterfaceTypeCmdSet = 5U;
+static constexpr uint8_t kJDWPInterfaceTypeInvokeMethodCmd = 1U;
 static constexpr uint8_t kJDWPObjectReferenceCmdSet = 9U;
 static constexpr uint8_t kJDWPObjectReferenceInvokeCmd = 6U;
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index bdc7ee2..91b006a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -63,7 +63,8 @@
      << "JIT data cache size=" << PrettySize(code_cache_->DataCacheSize()) << "\n"
      << "JIT current capacity=" << PrettySize(code_cache_->GetCurrentCapacity()) << "\n"
      << "JIT number of compiled code=" << code_cache_->NumberOfCompiledCode() << "\n"
-     << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n";
+     << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n"
+     << "JIT total number of osr compilations=" << code_cache_->NumberOfOsrCompilations() << "\n";
   cumulative_timings_.Dump(os);
 }
 
@@ -212,6 +213,10 @@
   return false;
 }
 
+bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
+  return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
+}
+
 Jit::~Jit() {
   DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
   if (dump_info_on_shutdown_) {
@@ -369,8 +374,7 @@
           continue;
         }
 
-        DCHECK(location == DexRegisterLocation::Kind::kInStack)
-            << DexRegisterLocation::PrettyDescriptor(location);
+        DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
 
         int32_t vreg_value = shadow_frame->GetVReg(vreg);
         int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 109ca3d..3f54192 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -84,8 +84,12 @@
   // into the specified class linker to the jit debug interface,
   void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
 
+  // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked.
   bool JitAtFirstUse();
 
+  // Return whether we can invoke JIT code for `method`.
+  bool CanInvokeCompiledCode(ArtMethod* method);
+
   // If an OSR compiled version is available for `method`,
   // and `dex_pc + dex_pc_offset` is an entry point of that compiled
   // version, this method will jump to the compiled code, let it run,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d5a9d66..e0380bd 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -123,10 +123,13 @@
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
       data_end_(initial_data_capacity),
-      has_done_one_collection_(false),
+      last_collection_increased_code_cache_(false),
       last_update_time_ns_(0),
       garbage_collect_code_(garbage_collect_code),
-      number_of_compilations_(0) {
+      used_memory_for_data_(0),
+      used_memory_for_code_(0),
+      number_of_compilations_(0),
+      number_of_osr_compilations_(0) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -336,6 +339,7 @@
     MutexLock mu(self, lock_);
     method_code_map_.Put(code_ptr, method);
     if (osr) {
+      number_of_osr_compilations_++;
       osr_code_map_.Put(method, code_ptr);
     } else {
       Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
@@ -364,6 +368,11 @@
   return number_of_compilations_;
 }
 
+size_t JitCodeCache::NumberOfOsrCompilations() {
+  MutexLock mu(Thread::Current(), lock_);
+  return number_of_osr_compilations_;
+}
+
 size_t JitCodeCache::CodeCacheSize() {
   MutexLock mu(Thread::Current(), lock_);
   return CodeCacheSizeLocked();
@@ -524,8 +533,42 @@
   return true;
 }
 
+void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
+  Barrier barrier(0);
+  size_t threads_running_checkpoint = 0;
+  MarkCodeClosure closure(this, &barrier);
+  threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+  // Now that we have run our checkpoint, move to a suspended state and wait
+  // for other threads to run the checkpoint.
+  ScopedThreadSuspension sts(self, kSuspended);
+  if (threads_running_checkpoint != 0) {
+    barrier.Increment(self, threads_running_checkpoint);
+  }
+}
+
+bool JitCodeCache::ShouldDoFullCollection() {
+  if (current_capacity_ == max_capacity_) {
+    // Always do a full collection when the code cache is full.
+    return true;
+  } else if (current_capacity_ < kReservedCapacity) {
+    // Always do partial collection when the code cache size is below the reserved
+    // capacity.
+    return false;
+  } else if (last_collection_increased_code_cache_) {
+    // This time do a full collection.
+    return true;
+  } else {
+    // This time do a partial collection.
+    return false;
+  }
+}
+
 void JitCodeCache::GarbageCollectCache(Thread* self) {
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+  if (!garbage_collect_code_) {
+    MutexLock mu(self, lock_);
+    IncreaseCodeCacheCapacity();
+    return;
+  }
 
   // Wait for an existing collection, or let everyone know we are starting one.
   {
@@ -534,45 +577,113 @@
     if (WaitForPotentialCollectionToComplete(self)) {
       return;
     } else {
-      collection_in_progress_ = true;
-    }
-  }
-
-  // Check if we just need to grow the capacity. If we don't, allocate the bitmap while
-  // we hold the lock.
-  {
-    MutexLock mu(self, lock_);
-    if (!garbage_collect_code_ || current_capacity_ < kReservedCapacity) {
-      IncreaseCodeCacheCapacity();
-      NotifyCollectionDone(self);
-      return;
-    } else if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
-      has_done_one_collection_ = false;
-      NotifyCollectionDone(self);
-      return;
-    } else {
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
           reinterpret_cast<uintptr_t>(code_map_->Begin()),
           reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+      collection_in_progress_ = true;
     }
   }
 
+  bool do_full_collection = false;
+  {
+    MutexLock mu(self, lock_);
+    do_full_collection = ShouldDoFullCollection();
+  }
+
   if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
-    LOG(INFO) << "Clearing code cache, code="
+    LOG(INFO) << "Do "
+              << (do_full_collection ? "full" : "partial")
+              << " code cache collection, code="
               << PrettySize(CodeCacheSize())
               << ", data=" << PrettySize(DataCacheSize());
   }
-  // Walk over all compiled methods and set the entry points of these
-  // methods to interpreter.
+
+  DoCollection(self, /* collect_profiling_info */ do_full_collection);
+
+  if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+    LOG(INFO) << "After code cache collection, code="
+              << PrettySize(CodeCacheSize())
+              << ", data=" << PrettySize(DataCacheSize());
+  }
+
   {
     MutexLock mu(self, lock_);
-    for (auto& it : method_code_map_) {
-      instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
+
+    // Increase the code cache only when we do partial collections.
+    // TODO: base this strategy on how full the code cache is?
+    if (do_full_collection) {
+      last_collection_increased_code_cache_ = false;
+    } else {
+      last_collection_increased_code_cache_ = true;
+      IncreaseCodeCacheCapacity();
     }
-    for (ProfilingInfo* info : profiling_infos_) {
-      if (!info->IsMethodBeingCompiled()) {
-        info->GetMethod()->SetProfilingInfo(nullptr);
+
+    bool next_collection_will_be_full = ShouldDoFullCollection();
+
+    // Start polling the liveness of compiled code to prepare for the next full collection.
+    // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+    // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+    if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
+        next_collection_will_be_full) {
+      // Save the entry point of methods we have compiled, and update the entry
+      // point of those methods to the interpreter. If the method is invoked, the
+      // interpreter will update its entry point to the compiled code and call it.
+      for (ProfilingInfo* info : profiling_infos_) {
+        const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        if (ContainsPc(entry_point)) {
+          info->SetSavedEntryPoint(entry_point);
+          info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+        }
+      }
+
+      DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+    }
+    live_bitmap_.reset(nullptr);
+    NotifyCollectionDone(self);
+  }
+}
+
+void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
+  MutexLock mu(self, lock_);
+  ScopedCodeCacheWrite scc(code_map_.get());
+  // Iterate over all compiled code and remove entries that are not marked and not
+  // the entrypoint of their corresponding ArtMethod.
+  for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+    const void* code_ptr = it->first;
+    ArtMethod* method = it->second;
+    uintptr_t allocation = FromCodeToAllocation(code_ptr);
+    const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+    const void* entrypoint = method->GetEntryPointFromQuickCompiledCode();
+    if ((entrypoint == method_header->GetEntryPoint()) || GetLiveBitmap()->Test(allocation)) {
+      ++it;
+    } else {
+      if (entrypoint == GetQuickToInterpreterBridge()) {
+        method->ClearCounter();
+      }
+      FreeCode(code_ptr, method);
+      it = method_code_map_.erase(it);
+    }
+  }
+}
+
+void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
+  {
+    MutexLock mu(self, lock_);
+    if (collect_profiling_info) {
+      // Clear the profiling info of methods that do not have compiled code as entrypoint.
+      // Also remove the saved entry point from the ProfilingInfo objects.
+      for (ProfilingInfo* info : profiling_infos_) {
+        const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+          info->GetMethod()->SetProfilingInfo(nullptr);
+        }
+        info->SetSavedEntryPoint(nullptr);
+      }
+    } else if (kIsDebugBuild) {
+      // Sanity check that the profiling infos do not have a dangling entry point.
+      for (ProfilingInfo* info : profiling_infos_) {
+        DCHECK(info->GetSavedEntryPoint() == nullptr);
       }
     }
 
@@ -582,65 +693,60 @@
   }
 
   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
-  {
-    Barrier barrier(0);
-    size_t threads_running_checkpoint = 0;
-    MarkCodeClosure closure(this, &barrier);
-    threads_running_checkpoint =
-        Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
-    // Now that we have run our checkpoint, move to a suspended state and wait
-    // for other threads to run the checkpoint.
-    ScopedThreadSuspension sts(self, kSuspended);
-    if (threads_running_checkpoint != 0) {
-      barrier.Increment(self, threads_running_checkpoint);
-    }
-  }
+  MarkCompiledCodeOnThreadStacks(self);
 
-  {
+  // Remove compiled code that is not the entrypoint of their method and not in the call
+  // stack.
+  RemoveUnusedAndUnmarkedCode(self);
+
+  if (collect_profiling_info) {
     MutexLock mu(self, lock_);
-    // Free unused compiled code, and restore the entry point of used compiled code.
-    {
-      ScopedCodeCacheWrite scc(code_map_.get());
-      for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
-        const void* code_ptr = it->first;
-        ArtMethod* method = it->second;
-        uintptr_t allocation = FromCodeToAllocation(code_ptr);
-        const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-        if (GetLiveBitmap()->Test(allocation)) {
-          instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
-          ++it;
-        } else {
-          method->ClearCounter();
-          DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
-          FreeCode(code_ptr, method);
-          it = method_code_map_.erase(it);
-        }
-      }
-    }
-
-    // Free all profiling infos of methods that were not being compiled.
+    // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
-        if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+        const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
+        // that the compiled code would not get revived. As mutator threads run concurrently,
+        // they may have revived the compiled code, and now we are in the situation where
+        // a method has compiled code but no ProfilingInfo.
+        // We make sure compiled methods have a ProfilingInfo object. It is needed for
+        // code cache collection.
+        if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+          // We clear the inline caches as classes in it might be stalled.
+          info->ClearInlineCaches();
+          // Do a fence to make sure the clearing is seen before attaching to the method.
+          QuasiAtomic::ThreadFenceRelease();
+          info->GetMethod()->SetProfilingInfo(info);
+        } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+          // No need for this ProfilingInfo object anymore.
           FreeData(reinterpret_cast<uint8_t*>(info));
           return true;
         }
         return false;
       });
     profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
-
-    live_bitmap_.reset(nullptr);
-    has_done_one_collection_ = true;
-    NotifyCollectionDone(self);
-  }
-
-  if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
-    LOG(INFO) << "After clearing code cache, code="
-              << PrettySize(CodeCacheSize())
-              << ", data=" << PrettySize(DataCacheSize());
+    DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
   }
 }
 
+bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
+  // Check that methods we have compiled do have a ProfilingInfo object. We would
+  // have memory leaks of compiled code otherwise.
+  for (const auto& it : method_code_map_) {
+    ArtMethod* method = it.second;
+    if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+      const void* code_ptr = it.first;
+      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+      if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+        // If the code is not dead, then we have a problem. Note that this can even
+        // happen just after a collection, as mutator threads are running in parallel
+        // and could deoptimize an existing compiled code.
+        return false;
+      }
+    }
+  }
+  return true;
+}
 
 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
   static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
@@ -683,23 +789,38 @@
 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
                                               ArtMethod* method,
                                               const std::vector<uint32_t>& entries,
-                                              bool retry_allocation) {
-  ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
+                                              bool retry_allocation)
+    // No thread safety analysis as we are using TryLock/Unlock explicitly.
+    NO_THREAD_SAFETY_ANALYSIS {
+  ProfilingInfo* info = nullptr;
+  if (!retry_allocation) {
+    // If we are allocating for the interpreter, just try to lock, to avoid
+    // lock contention with the JIT.
+    if (lock_.ExclusiveTryLock(self)) {
+      info = AddProfilingInfoInternal(self, method, entries);
+      lock_.ExclusiveUnlock(self);
+    }
+  } else {
+    {
+      MutexLock mu(self, lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
 
-  if (info == nullptr && retry_allocation) {
-    GarbageCollectCache(self);
-    info = AddProfilingInfoInternal(self, method, entries);
+    if (info == nullptr) {
+      GarbageCollectCache(self);
+      MutexLock mu(self, lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
   }
   return info;
 }
 
-ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
                                                       ArtMethod* method,
                                                       const std::vector<uint32_t>& entries) {
   size_t profile_info_size = RoundUp(
       sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
       sizeof(void*));
-  MutexLock mu(self, lock_);
 
   // Check whether some other thread has concurrently created it.
   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
@@ -781,6 +902,13 @@
 
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
                                              const OatQuickMethodHeader* header) {
+  ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+  if ((profiling_info != nullptr) &&
+      (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
+    // Prevent future uses of the compiled code.
+    profiling_info->SetSavedEntryPoint(nullptr);
+  }
+
   if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
     // The entrypoint is the one to invalidate, so we just update
     // it to the interpreter entry point and clear the counter to get the method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 74ce7b5..aa1b139 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -73,6 +73,7 @@
 
   // Number of compilations done throughout the lifetime of the JIT.
   size_t NumberOfCompilations() REQUIRES(!lock_);
+  size_t NumberOfOsrCompilations() REQUIRES(!lock_);
 
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_)
@@ -123,6 +124,11 @@
     return live_bitmap_.get();
   }
 
+  // Return whether we should do a full collection given the current state of the cache.
+  bool ShouldDoFullCollection()
+      REQUIRES(lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
       REQUIRES(!lock_)
@@ -207,7 +213,7 @@
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
-      REQUIRES(!lock_)
+      REQUIRES(lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
@@ -234,6 +240,22 @@
   // Set the footprint limit of the code cache.
   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
 
+  void DoCollection(Thread* self, bool collect_profiling_info)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  void RemoveUnusedAndUnmarkedCode(Thread* self)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  void MarkCompiledCodeOnThreadStacks(Thread* self)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  bool CheckLiveCompiledCodeHasProfilingInfo()
+      REQUIRES(lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -269,8 +291,8 @@
   // The current footprint in bytes of the data portion of the code cache.
   size_t data_end_ GUARDED_BY(lock_);
 
-  // Whether a collection has already been done on the current capacity.
-  bool has_done_one_collection_ GUARDED_BY(lock_);
+  // Whether the last collection round increased the code cache.
+  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
 
   // Last time the the code_cache was updated.
   // It is atomic to avoid locking when reading it.
@@ -292,6 +314,7 @@
 
   // Number of compilations done throughout the lifetime of the JIT.
   size_t number_of_compilations_ GUARDED_BY(lock_);
+  size_t number_of_osr_compilations_ GUARDED_BY(lock_);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index a4e40ad..d751e5a 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -25,6 +25,9 @@
 namespace art {
 namespace jit {
 
+// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
+static constexpr int kJitPoolThreadPthreadPriority = 9;
+
 class JitCompileTask FINAL : public Task {
  public:
   enum TaskKind {
@@ -92,6 +95,7 @@
   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
   // is not null when we instrument.
   thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+  thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
   thread_pool_->StartWorkers(Thread::Current());
   {
     // Add Jit interpreter instrumentation, tells the interpreter when
@@ -183,7 +187,18 @@
     return;
   }
 
-  instrumentation_cache_->AddSamples(thread, method, 1);
+  ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+  // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
+  // instead of interpreting the method.
+  // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+  // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+  if ((profiling_info != nullptr) &&
+      (profiling_info->GetSavedEntryPoint() != nullptr) &&
+      !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+    method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
+  } else {
+    instrumentation_cache_->AddSamples(thread, method, 1);
+  }
 }
 
 void JitInstrumentationListener::Branch(Thread* thread,
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 747b112..67c9b5f 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -48,9 +48,11 @@
   }
 }
 
-bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
-                                               const std::vector<ArtMethod*>& methods) {
-  if (methods.empty()) {
+bool ProfileCompilationInfo::SaveProfilingInfo(
+    const std::string& filename,
+    const std::vector<ArtMethod*>& methods,
+    const std::set<DexCacheResolvedClasses>& resolved_classes) {
+  if (methods.empty() && resolved_classes.empty()) {
     VLOG(profiler) << "No info to save to " << filename;
     return true;
   }
@@ -71,14 +73,17 @@
   }
   {
     ScopedObjectAccess soa(Thread::Current());
-    for (auto it = methods.begin(); it != methods.end(); it++) {
-      const DexFile* dex_file = (*it)->GetDexFile();
-      if (!info.AddData(GetProfileDexFileKey(dex_file->GetLocation()),
-                        dex_file->GetLocationChecksum(),
-                        (*it)->GetDexMethodIndex())) {
+    for (ArtMethod* method : methods) {
+      const DexFile* dex_file = method->GetDexFile();
+      if (!info.AddMethodIndex(GetProfileDexFileKey(dex_file->GetLocation()),
+                               dex_file->GetLocationChecksum(),
+                               method->GetDexMethodIndex())) {
         return false;
       }
     }
+    for (const DexCacheResolvedClasses& dex_cache : resolved_classes) {
+      info.AddResolvedClasses(dex_cache);
+    }
   }
 
   if (!flock.GetFile()->ClearContent()) {
@@ -116,13 +121,14 @@
 
 static constexpr const char kFieldSeparator = ',';
 static constexpr const char kLineSeparator = '\n';
+static constexpr const char* kClassesMarker = "classes";
 
 /**
  * Serialization format:
- *    dex_location1,dex_location_checksum1,method_id11,method_id12...
- *    dex_location2,dex_location_checksum2,method_id21,method_id22...
+ *    dex_location1,dex_location_checksum1,method_id11,method_id12...,classes,class_id1,class_id2...
+ *    dex_location2,dex_location_checksum2,method_id21,method_id22...,classes,class_id1,class_id2...
  * e.g.
- *    app.apk,131232145,11,23,454,54
+ *    app.apk,131232145,11,23,454,54,classes,1,2,4,1234
  *    app.apk:classes5.dex,218490184,39,13,49,1
  **/
 bool ProfileCompilationInfo::Save(int fd) {
@@ -133,11 +139,20 @@
   for (const auto& it : info_) {
     const std::string& dex_location = it.first;
     const DexFileData& dex_data = it.second;
+    if (dex_data.method_set.empty() && dex_data.class_set.empty()) {
+      continue;
+    }
 
     os << dex_location << kFieldSeparator << dex_data.checksum;
     for (auto method_it : dex_data.method_set) {
       os << kFieldSeparator << method_it;
     }
+    if (!dex_data.class_set.empty()) {
+      os << kFieldSeparator << kClassesMarker;
+      for (auto class_id : dex_data.class_set) {
+        os << kFieldSeparator << class_id;
+      }
+    }
     os << kLineSeparator;
   }
 
@@ -168,18 +183,50 @@
   }
 }
 
-bool ProfileCompilationInfo::AddData(const std::string& dex_location,
-                                     uint32_t checksum,
-                                     uint16_t method_idx) {
+ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
+    const std::string& dex_location,
+    uint32_t checksum) {
   auto info_it = info_.find(dex_location);
   if (info_it == info_.end()) {
     info_it = info_.Put(dex_location, DexFileData(checksum));
   }
   if (info_it->second.checksum != checksum) {
     LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+    return nullptr;
+  }
+  return &info_it->second;
+}
+
+bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
+  const std::string dex_location = GetProfileDexFileKey(classes.GetDexLocation());
+  const uint32_t checksum = classes.GetLocationChecksum();
+  DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+  if (data == nullptr) {
     return false;
   }
-  info_it->second.method_set.insert(method_idx);
+  data->class_set.insert(classes.GetClasses().begin(), classes.GetClasses().end());
+  return true;
+}
+
+bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
+                                            uint32_t checksum,
+                                            uint16_t method_idx) {
+  DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+  if (data == nullptr) {
+    return false;
+  }
+  data->method_set.insert(method_idx);
+  return true;
+}
+
+bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
+                                           uint32_t checksum,
+                                           uint16_t class_idx) {
+  DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+  if (data == nullptr) {
+    return false;
+  }
+  data->class_set.insert(class_idx);
   return true;
 }
 
@@ -198,12 +245,30 @@
   }
 
   for (size_t i = 2; i < parts.size(); i++) {
+    if (parts[i] == kClassesMarker) {
+      ++i;
+      // All of the remaining idx are class def indexes.
+      for (++i; i < parts.size(); ++i) {
+        uint32_t class_def_idx;
+        if (!ParseInt(parts[i].c_str(), &class_def_idx)) {
+          LOG(WARNING) << "Cannot parse class_def_idx " << parts[i];
+          return false;
+        } else if (class_def_idx >= std::numeric_limits<uint16_t>::max()) {
+          LOG(WARNING) << "Class def idx " << class_def_idx << " is larger than uint16_t max";
+          return false;
+        }
+        if (!AddClassIndex(dex_location, checksum, class_def_idx)) {
+          return false;
+        }
+      }
+      break;
+    }
     uint32_t method_idx;
     if (!ParseInt(parts[i].c_str(), &method_idx)) {
       LOG(WARNING) << "Cannot parse method_idx " << parts[i];
       return false;
     }
-    if (!AddData(dex_location, checksum, method_idx)) {
+    if (!AddMethodIndex(dex_location, checksum, method_idx)) {
       return false;
     }
   }
@@ -280,6 +345,8 @@
     }
     info_it->second.method_set.insert(other_dex_data.method_set.begin(),
                                       other_dex_data.method_set.end());
+    info_it->second.class_set.insert(other_dex_data.class_set.begin(),
+                                     other_dex_data.class_set.end());
   }
   return true;
 }
@@ -347,4 +414,16 @@
   return info_.Equals(other.info_);
 }
 
+std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() const {
+  std::set<DexCacheResolvedClasses> ret;
+  for (auto&& pair : info_) {
+    const std::string& profile_key = pair.first;
+    const DexFileData& data = pair.second;
+    DexCacheResolvedClasses classes(profile_key, data.checksum);
+    classes.AddClasses(data.class_set.begin(), data.class_set.end());
+    ret.insert(classes);
+  }
+  return ret;
+}
+
 }  // namespace art
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index edc591c..ee7ce27 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -21,6 +21,7 @@
 #include <vector>
 
 #include "atomic.h"
+#include "dex_cache_resolved_classes.h"
 #include "dex_file.h"
 #include "method_reference.h"
 #include "safe_map.h"
@@ -28,6 +29,7 @@
 namespace art {
 
 class ArtMethod;
+class DexCacheProfileData;
 
 // TODO: rename file.
 /**
@@ -43,7 +45,8 @@
   // Note that the saving proceeds only if the file can be locked for exclusive access.
   // If not (the locking is not blocking), the function does not save and returns false.
   static bool SaveProfilingInfo(const std::string& filename,
-                                const std::vector<ArtMethod*>& methods);
+                                const std::vector<ArtMethod*>& methods,
+                                const std::set<DexCacheResolvedClasses>& resolved_classes);
 
   // Loads profile information from the given file descriptor.
   bool Load(int fd);
@@ -68,14 +71,17 @@
   bool Equals(const ProfileCompilationInfo& other);
   static std::string GetProfileDexFileKey(const std::string& dex_location);
 
- private:
-  bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
-  bool ProcessLine(const std::string& line);
+  // Returns the class descriptors for all of the classes in the profiles' class sets.
+  // Note the dex location is actually the profile key, the caller needs to call back in to the
+  // profile info stuff to generate a map back to the dex location.
+  std::set<DexCacheResolvedClasses> GetResolvedClasses() const;
 
+ private:
   struct DexFileData {
     explicit DexFileData(uint32_t location_checksum) : checksum(location_checksum) {}
     uint32_t checksum;
     std::set<uint16_t> method_set;
+    std::set<uint16_t> class_set;
 
     bool operator==(const DexFileData& other) const {
       return checksum == other.checksum && method_set == other.method_set;
@@ -84,6 +90,13 @@
 
   using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
 
+  DexFileData* GetOrAddDexFileData(const std::string& dex_location, uint32_t checksum);
+  bool AddMethodIndex(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
+  bool AddClassIndex(const std::string& dex_location, uint32_t checksum, uint16_t class_idx);
+  bool AddResolvedClasses(const DexCacheResolvedClasses& classes)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool ProcessLine(const std::string& line);
+
   friend class ProfileCompilationInfoTest;
   friend class CompilerDriverProfileTest;
   friend class ProfileAssistantTest;
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 482ea06..fdd8c6e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -53,7 +53,7 @@
                uint32_t checksum,
                uint16_t method_index,
                ProfileCompilationInfo* info) {
-    return info->AddData(dex_location, checksum, method_index);
+    return info->AddMethodIndex(dex_location, checksum, method_index);
   }
 
   uint32_t GetFd(const ScratchFile& file) {
@@ -73,8 +73,11 @@
   ASSERT_NE(class_loader, nullptr);
 
   // Save virtual methods from Main.
+  std::set<DexCacheResolvedClasses> resolved_classes;
   std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;");
-  ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(), main_methods));
+  ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(),
+                                                        main_methods,
+                                                        resolved_classes));
 
   // Check that what we saved is in the profile.
   ProfileCompilationInfo info1;
@@ -89,7 +92,9 @@
 
   // Save virtual methods from Second.
   std::vector<ArtMethod*> second_methods = GetVirtualMethods(class_loader, "LSecond;");
-  ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(), second_methods));
+  ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(),
+                                                        second_methods,
+                                                        resolved_classes));
 
   // Check that what we saved is in the profile (methods form Main and Second).
   ProfileCompilationInfo info2;
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index b1a5a4b..ab26f6f 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -32,6 +32,7 @@
 static constexpr const uint64_t kRandomDelayMaxMs = 20 * 1000;  // 20 seconds
 static constexpr const uint64_t kMaxBackoffMs = 5 * 60 * 1000;  // 5 minutes
 static constexpr const uint64_t kSavePeriodMs = 10 * 1000;  // 10 seconds
+static constexpr const uint64_t kInitialDelayMs = 2 * 1000;  // 2 seconds
 static constexpr const double kBackoffCoef = 1.5;
 
 static constexpr const uint32_t kMinimumNrOrMethodsToSave = 10;
@@ -45,6 +46,7 @@
     : jit_code_cache_(jit_code_cache),
       code_cache_last_update_time_ns_(0),
       shutting_down_(false),
+      first_profile_(true),
       wait_lock_("ProfileSaver wait lock"),
       period_condition_("ProfileSaver period condition", wait_lock_) {
   AddTrackedLocations(output_filename, code_paths);
@@ -56,13 +58,18 @@
 
   uint64_t save_period_ms = kSavePeriodMs;
   VLOG(profiler) << "Save profiling information every " << save_period_ms << " ms";
-  while (true) {
-    if (ShuttingDown(self)) {
-      break;
-    }
 
-    uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs;
-    uint64_t sleep_time_ms = save_period_ms + random_sleep_delay_ms;
+  bool first_iteration = true;
+  while (!ShuttingDown(self)) {
+    uint64_t sleep_time_ms;
+    if (first_iteration) {
+      // Sleep less long for the first iteration since we want to record loaded classes shortly
+      // after app launch.
+      sleep_time_ms = kInitialDelayMs;
+    } else {
+      const uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs;
+      sleep_time_ms = save_period_ms + random_sleep_delay_ms;
+    }
     {
       MutexLock mu(self, wait_lock_);
       period_condition_.TimedWait(self, sleep_time_ms, 0);
@@ -81,13 +88,14 @@
       // Reset the period to the initial value as it's highly likely to JIT again.
       save_period_ms = kSavePeriodMs;
     }
+    first_iteration = false;
   }
 }
 
 bool ProfileSaver::ProcessProfilingInfo() {
   uint64_t last_update_time_ns = jit_code_cache_->GetLastUpdateTimeNs();
-  if (last_update_time_ns - code_cache_last_update_time_ns_
-      < kMinimumTimeBetweenCodeCacheUpdatesNs) {
+  if (!first_profile_ && last_update_time_ns - code_cache_last_update_time_ns_
+          < kMinimumTimeBetweenCodeCacheUpdatesNs) {
     VLOG(profiler) << "Not enough time has passed since the last code cache update."
         << "Last update: " << last_update_time_ns
         << " Last save: " << code_cache_last_update_time_ns_;
@@ -113,19 +121,27 @@
       ScopedObjectAccess soa(Thread::Current());
       jit_code_cache_->GetCompiledArtMethods(locations, methods);
     }
-    if (methods.size() < kMinimumNrOrMethodsToSave) {
+    // Always save for the first one for loaded classes profile.
+    if (methods.size() < kMinimumNrOrMethodsToSave && !first_profile_) {
       VLOG(profiler) << "Not enough information to save to: " << filename
           <<" Nr of methods: " << methods.size();
       return false;
     }
 
-    if (!ProfileCompilationInfo::SaveProfilingInfo(filename, methods)) {
+    std::set<DexCacheResolvedClasses> resolved_classes;
+    if (first_profile_) {
+      ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+      resolved_classes = class_linker->GetResolvedClasses(/*ignore boot classes*/true);
+    }
+
+    if (!ProfileCompilationInfo::SaveProfilingInfo(filename, methods, resolved_classes)) {
       LOG(WARNING) << "Could not save profiling info to " << filename;
       return false;
     }
 
     VLOG(profiler) << "Profile process time: " << PrettyDuration(NanoTime() - start);
   }
+  first_profile_ = false;
   return true;
 }
 
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 3342790..21017c1 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -74,6 +74,7 @@
       GUARDED_BY(Locks::profiler_lock_);
   uint64_t code_cache_last_update_time_ns_;
   bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
+  bool first_profile_ = true;
 
   // Save period condition support.
   Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index ab72373..a8c056c 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -126,12 +126,25 @@
     is_method_being_compiled_ = value;
   }
 
+  void SetSavedEntryPoint(const void* entry_point) {
+    saved_entry_point_ = entry_point;
+  }
+
+  const void* GetSavedEntryPoint() const {
+    return saved_entry_point_;
+  }
+
+  void ClearInlineCaches() {
+    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+  }
+
  private:
   ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
       : number_of_inline_caches_(entries.size()),
         method_(method),
-        is_method_being_compiled_(false) {
-    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+        is_method_being_compiled_(false),
+        saved_entry_point_(nullptr) {
+    ClearInlineCaches();
     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
       cache_[i].dex_pc_ = entries[i];
     }
@@ -148,6 +161,10 @@
   // TODO: Make the JIT code cache lock global.
   bool is_method_being_compiled_;
 
+  // Entry point of the corresponding ArtMethod, while the JIT code cache
+  // is poking for the liveness of compiled code.
+  const void* saved_entry_point_;
+
   // Dynamically allocated array of size `number_of_inline_caches_`.
   InlineCache cache_[0];
 
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index c908b39..11156c6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -230,17 +230,16 @@
     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
   }
 
-  // We call this here so that we can try and generate a full error
-  // message with the overlapping mapping. There's no guarantee that
-  // that there will be an overlap though, since
-  // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
-  //   true, even if there is no overlap
-  // - There might have been an overlap at the point of mmap, but the
-  //   overlapping region has since been unmapped.
-  std::string error_detail;
-  CheckNonOverlapping(expected, limit, &error_detail);
-
   if (error_msg != nullptr) {
+    // We call this here so that we can try and generate a full error
+    // message with the overlapping mapping. There's no guarantee that
+    // that there will be an overlap though, since
+    // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
+    //   true, even if there is no overlap
+    // - There might have been an overlap at the point of mmap, but the
+    //   overlapping region has since been unmapped.
+    std::string error_detail;
+    CheckNonOverlapping(expected, limit, &error_detail);
     std::ostringstream os;
     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
@@ -338,11 +337,18 @@
   saved_errno = errno;
 
   if (actual == MAP_FAILED) {
-    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+    if (error_msg != nullptr) {
+      PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
 
-    *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process "
-                              "maps in the log.", expected_ptr, page_aligned_byte_count, prot,
-                              flags, fd.get(), strerror(saved_errno));
+      *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
+                                    "See process maps in the log.",
+                                expected_ptr,
+                                page_aligned_byte_count,
+                                prot,
+                                flags,
+                                fd.get(),
+                                strerror(saved_errno));
+    }
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 81c855e..e703b78 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -164,6 +164,19 @@
   ASSERT_TRUE(error_msg.empty());
 }
 
+TEST_F(MemMapTest, MapAnonymousFailNullError) {
+  CommonInit();
+  // Test that we don't crash with a null error_str when mapping at an invalid location.
+  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
+                                                    reinterpret_cast<uint8_t*>(kPageSize),
+                                                    0x20000,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    false,
+                                                    nullptr));
+  ASSERT_EQ(nullptr, map.get());
+}
+
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
   CommonInit();
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 91a9870..5a07dee 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -21,25 +21,36 @@
 namespace art {
 namespace mirror {
 
+template <bool kTransactionActive>
 bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
-  auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
-  SetArtMethod(method);
-  SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
-  SetFieldObject<false>(
+  auto* interface_method = method->GetInterfaceMethodIfProxy(
+      kTransactionActive
+          ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+          : sizeof(void*));
+  SetArtMethod<kTransactionActive>(method);
+  SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass());
+  SetFieldObject<kTransactionActive>(
       DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
-  SetField32<false>(AccessFlagsOffset(), method->GetAccessFlags());
-  SetField32<false>(DexMethodIndexOffset(), method->GetDexMethodIndex());
+  SetField32<kTransactionActive>(AccessFlagsOffset(), method->GetAccessFlags());
+  SetField32<kTransactionActive>(DexMethodIndexOffset(), method->GetDexMethodIndex());
   return true;
 }
 
+template bool AbstractMethod::CreateFromArtMethod<false>(ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<true>(ArtMethod* method);
+
 ArtMethod* AbstractMethod::GetArtMethod() {
   return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
 }
 
+template <bool kTransactionActive>
 void AbstractMethod::SetArtMethod(ArtMethod* method) {
-  SetField64<false>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
+  SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
 }
 
+template void AbstractMethod::SetArtMethod<false>(ArtMethod* method);
+template void AbstractMethod::SetArtMethod<true>(ArtMethod* method);
+
 mirror::Class* AbstractMethod::GetDeclaringClass() {
   return GetFieldObject<mirror::Class>(DeclaringClassOffset());
 }
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index dc084be..a39f94d 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,11 +34,13 @@
 class MANAGED AbstractMethod : public AccessibleObject {
  public:
   // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+  template <bool kTransactionActive = false>
   bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
   // Only used by the image writer.
+  template <bool kTransactionActive = false>
   void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3f806d3..19584ed 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -502,7 +502,7 @@
   if (method->IsDirect()) {
     return method;
   }
-  if (method->GetDeclaringClass()->IsInterface() && !method->IsMiranda()) {
+  if (method->GetDeclaringClass()->IsInterface() && !method->IsCopied()) {
     return FindVirtualMethodForInterface(method, pointer_size);
   }
   return FindVirtualMethodForVirtual(method, pointer_size);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9190e44..7900eac 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1054,5 +1054,89 @@
   return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id);
 }
 
+template <bool kTransactionActive>
+mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
+                                                 mirror::Class* klass,
+                                                 mirror::String* name,
+                                                 mirror::ObjectArray<mirror::Class>* args) {
+  // Covariant return types permit the class to define multiple
+  // methods with the same name and parameter types. Prefer to
+  // return a non-synthetic method in such situations. We may
+  // still return a synthetic method to handle situations like
+  // escalated visibility. We never return miranda methods that
+  // were synthesized by the runtime.
+  constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
+  StackHandleScope<3> hs(self);
+  auto h_method_name = hs.NewHandle(name);
+  if (UNLIKELY(h_method_name.Get() == nullptr)) {
+    ThrowNullPointerException("name == null");
+    return nullptr;
+  }
+  auto h_args = hs.NewHandle(args);
+  Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+  ArtMethod* result = nullptr;
+  const size_t pointer_size = kTransactionActive
+                                  ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+                                  : sizeof(void*);
+  for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) {
+    auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+    // May cause thread suspension.
+    mirror::String* np_name = np_method->GetNameAsString(self);
+    if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+      if (UNLIKELY(self->IsExceptionPending())) {
+        return nullptr;
+      }
+      continue;
+    }
+    auto modifiers = m.GetAccessFlags();
+    if ((modifiers & kSkipModifiers) == 0) {
+      return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+    }
+    if ((modifiers & kAccMiranda) == 0) {
+      result = &m;  // Remember as potential result if it's not a miranda method.
+    }
+  }
+  if (result == nullptr) {
+    for (auto& m : h_klass->GetDirectMethods(pointer_size)) {
+      auto modifiers = m.GetAccessFlags();
+      if ((modifiers & kAccConstructor) != 0) {
+        continue;
+      }
+      auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+      // May cause thread suspension.
+      mirror::String* np_name = np_method->GetNameAsString(self);
+      if (np_name == nullptr) {
+        self->AssertPendingException();
+        return nullptr;
+      }
+      if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+        if (UNLIKELY(self->IsExceptionPending())) {
+          return nullptr;
+        }
+        continue;
+      }
+      if ((modifiers & kSkipModifiers) == 0) {
+        return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+      }
+      // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+      result = &m;
+    }
+  }
+  return result != nullptr
+      ? mirror::Method::CreateFromArtMethod<kTransactionActive>(self, result)
+      : nullptr;
+}
+
+template
+mirror::Method* Class::GetDeclaredMethodInternal<false>(Thread* self,
+                                                        mirror::Class* klass,
+                                                        mirror::String* name,
+                                                        mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Method* Class::GetDeclaredMethodInternal<true>(Thread* self,
+                                                       mirror::Class* klass,
+                                                       mirror::String* name,
+                                                       mirror::ObjectArray<mirror::Class>* args);
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 6e3463c..7082c88 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -55,6 +55,7 @@
 class Constructor;
 class DexCache;
 class IfTable;
+class Method;
 
 // C++ mirror of java.lang.Class
 class MANAGED Class FINAL : public Object {
@@ -759,6 +760,13 @@
         size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  template <bool kTransactionActive = false>
+  static Method* GetDeclaredMethodInternal(Thread* self,
+                                           mirror::Class* klass,
+                                           mirror::String* name,
+                                           mirror::ObjectArray<mirror::Class>* args)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 85c52e9..97973e6 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -51,15 +51,19 @@
   array_class_ = GcRoot<Class>(nullptr);
 }
 
+template <bool kTransactionActive>
 Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
   DCHECK(!method->IsConstructor()) << PrettyMethod(method);
   auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
   if (LIKELY(ret != nullptr)) {
-    static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+    static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
   }
   return ret;
 }
 
+template Method* Method::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
+template Method* Method::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+
 void Method::VisitRoots(RootVisitor* visitor) {
   static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
   array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 0c28e4f..12a72fe 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,6 +28,7 @@
 // C++ mirror of java.lang.reflect.Method.
 class MANAGED Method : public AbstractMethod {
  public:
+  template <bool kTransactionActive = false>
   static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index ed4c5fc..c31b22e 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -50,6 +50,11 @@
 // Used by a class to denote that the verifier has attempted to check it at least once.
 static constexpr uint32_t kAccVerificationAttempted = 0x00080000;  // class (runtime)
 static constexpr uint32_t kAccFastNative =            0x00080000;  // method (dex only)
+// This is set by the class linker during LinkInterfaceMethods. It is used by a method to represent
+// that it was copied from its declaring class into another class. All methods marked kAccMiranda
+// and kAccDefaultConflict will have this bit set. Any kAccDefault method contained in the methods_
+// array of a concrete class will also have this bit set.
+static constexpr uint32_t kAccCopied =                0x00100000;  // method (runtime)
 static constexpr uint32_t kAccMiranda =               0x00200000;  // method (dex only)
 static constexpr uint32_t kAccDefault =               0x00400000;  // method (runtime)
 // This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b5d859b..bf24de5 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -371,70 +371,13 @@
 
 static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
                                                jobject name, jobjectArray args) {
-  // Covariant return types permit the class to define multiple
-  // methods with the same name and parameter types. Prefer to
-  // return a non-synthetic method in such situations. We may
-  // still return a synthetic method to handle situations like
-  // escalated visibility. We never return miranda methods that
-  // were synthesized by the runtime.
-  constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
   ScopedFastNativeObjectAccess soa(env);
-  StackHandleScope<3> hs(soa.Self());
-  auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
-  if (UNLIKELY(h_method_name.Get() == nullptr)) {
-    ThrowNullPointerException("name == null");
-    return nullptr;
-  }
-  auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
-  Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
-  ArtMethod* result = nullptr;
-  for (auto& m : h_klass->GetDeclaredVirtualMethods(sizeof(void*))) {
-    auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
-    // May cause thread suspension.
-    mirror::String* np_name = np_method->GetNameAsString(soa.Self());
-    if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
-      if (UNLIKELY(soa.Self()->IsExceptionPending())) {
-        return nullptr;
-      }
-      continue;
-    }
-    auto modifiers = m.GetAccessFlags();
-    if ((modifiers & kSkipModifiers) == 0) {
-      return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
-    }
-    if ((modifiers & kAccMiranda) == 0) {
-      result = &m;  // Remember as potential result if it's not a miranda method.
-    }
-  }
-  if (result == nullptr) {
-    for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
-      auto modifiers = m.GetAccessFlags();
-      if ((modifiers & kAccConstructor) != 0) {
-        continue;
-      }
-      auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
-      // May cause thread suspension.
-      mirror::String* np_name = np_method->GetNameAsString(soa.Self());
-      if (np_name == nullptr) {
-        soa.Self()->AssertPendingException();
-        return nullptr;
-      }
-      if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
-        if (UNLIKELY(soa.Self()->IsExceptionPending())) {
-          return nullptr;
-        }
-        continue;
-      }
-      if ((modifiers & kSkipModifiers) == 0) {
-        return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
-      }
-      // Direct methods cannot be miranda methods, so this potential result must be synthetic.
-      result = &m;
-    }
-  }
-  return result != nullptr ?
-      soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), result)) :
-      nullptr;
+  mirror::Method* result = mirror::Class::GetDeclaredMethodInternal(
+      soa.Self(),
+      DecodeClass(soa, javaThis),
+      soa.Decode<mirror::String*>(name),
+      soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+  return soa.AddLocalReference<jobject>(result);
 }
 
 static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index c177f19..df794e1 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -67,9 +67,11 @@
 #endif
 }
 
-static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
-                                  jboolean isSharedNamespace, jstring javaLibrarySearchPath,
-                                  jstring javaLibraryPermittedPath) {
+static jstring Runtime_nativeLoad(JNIEnv* env,
+                                  jclass,
+                                  jstring javaFilename,
+                                  jobject javaLoader,
+                                  jstring javaLibrarySearchPath) {
   ScopedUtfChars filename(env, javaFilename);
   if (filename.c_str() == nullptr) {
     return nullptr;
@@ -79,7 +81,9 @@
 
   // Starting with N nativeLoad uses classloader local
   // linker namespace instead of global LD_LIBRARY_PATH
-  // (23 is Marshmallow)
+  // (23 is Marshmallow). This call is here to preserve
+  // backwards compatibility for the apps targeting sdk
+  // version <= 23
   if (target_sdk_version == 0) {
     SetLdLibraryPath(env, javaLibrarySearchPath);
   }
@@ -90,9 +94,7 @@
     bool success = vm->LoadNativeLibrary(env,
                                          filename.c_str(),
                                          javaLoader,
-                                         isSharedNamespace == JNI_TRUE,
                                          javaLibrarySearchPath,
-                                         javaLibraryPermittedPath,
                                          &error_msg);
     if (success) {
       return nullptr;
@@ -121,7 +123,7 @@
   NATIVE_METHOD(Runtime, gc, "()V"),
   NATIVE_METHOD(Runtime, maxMemory, "!()J"),
   NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
-  NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
+  NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"),
   NATIVE_METHOD(Runtime, totalMemory, "!()J"),
 };
 
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 34d6a37..5a219ef 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -50,8 +50,10 @@
   return soa.AddLocalReference<jstring>(result);
 }
 
+// The char array passed as `java_data` must not be a null reference.
 static jstring StringFactory_newStringFromChars(JNIEnv* env, jclass, jint offset,
                                                 jint char_count, jcharArray java_data) {
+  DCHECK(java_data != nullptr);
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray*>(java_data)));
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index 04f0ba0..4d2ea67 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -16,13 +16,14 @@
 
 #include "java_util_concurrent_atomic_AtomicLong.h"
 
+#include "arch/instruction_set.h"
 #include "atomic.h"
 #include "jni_internal.h"
 
 namespace art {
 
 static jboolean AtomicLong_VMSupportsCS8(JNIEnv*, jclass) {
-  return QuasiAtomic::LongAtomicsUseMutexes() ? JNI_FALSE : JNI_TRUE;
+  return QuasiAtomic::LongAtomicsUseMutexes(kRuntimeISA) ? JNI_FALSE : JNI_TRUE;
 }
 
 static JNINativeMethod gMethods[] = {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 910163c..fb91a8c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -228,6 +228,10 @@
     return End() - Begin();
   }
 
+  bool Contains(const void* p) const {
+    return p >= Begin() && p < End();
+  }
+
   size_t BssSize() const {
     return BssEnd() - BssBegin();
   }
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 18cf81a..ea26d58 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -16,6 +16,8 @@
 
 #include "oat_file_manager.h"
 
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <cutils/trace.h>
 #include <memory>
 #include <queue>
 #include <vector>
@@ -386,13 +388,15 @@
             ScopedSuspendAll ssa("Add image space");
             runtime->GetHeap()->AddSpace(image_space.get());
           }
-          added_image_space = true;
-          if (runtime->GetClassLinker()->AddImageSpace(image_space.get(),
-                                                       h_loader,
-                                                       dex_elements,
-                                                       dex_location,
-                                                       /*out*/&dex_files,
-                                                       /*out*/&temp_error_msg)) {
+          ATRACE_BEGIN(StringPrintf("Adding image space for location %s", dex_location).c_str());
+          added_image_space = runtime->GetClassLinker()->AddImageSpace(image_space.get(),
+                                                                       h_loader,
+                                                                       dex_elements,
+                                                                       dex_location,
+                                                                       /*out*/&dex_files,
+                                                                       /*out*/&temp_error_msg);
+          ATRACE_END();
+          if (added_image_space) {
             // Successfully added image space to heap, release the map so that it does not get
             // freed.
             image_space.release();
@@ -407,7 +411,6 @@
               ScopedSuspendAll ssa("Remove image space");
               runtime->GetHeap()->RemoveSpace(image_space.get());
             }
-            added_image_space = false;
             // Non-fatal, don't update error_msg.
           }
         }
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 725067a..d377457 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -329,9 +329,10 @@
 }
 
 
-JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env, jstring javaFilename, jobject javaLoader,
-                                 jboolean isSharedNamespace, jstring javaLibrarySearchPath,
-                                 jstring javaLibraryPermittedPath) {
+JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env,
+                                 jstring javaFilename,
+                                 jobject javaLoader,
+                                 jstring javaLibrarySearchPath) {
   ScopedUtfChars filename(env, javaFilename);
   if (filename.c_str() == NULL) {
     return NULL;
@@ -354,9 +355,7 @@
     bool success = vm->LoadNativeLibrary(env,
                                          filename.c_str(),
                                          javaLoader,
-                                         isSharedNamespace == JNI_TRUE,
                                          javaLibrarySearchPath,
-                                         javaLibraryPermittedPath,
                                          &error_msg);
     if (success) {
       return nullptr;
diff --git a/runtime/primitive.h b/runtime/primitive.h
index ca42c47..2454a21 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -46,6 +46,7 @@
     kPrimFloat,
     kPrimDouble,
     kPrimVoid,
+    kPrimLast = kPrimVoid
   };
 
   static Type GetType(char type) {
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 0b09a70..7e84b40 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -37,6 +37,8 @@
 enum InlineMethodOpcode : uint16_t {
   kIntrinsicDoubleCvt,
   kIntrinsicFloatCvt,
+  kIntrinsicFloat2Int,
+  kIntrinsicDouble2Long,
   kIntrinsicFloatIsInfinite,
   kIntrinsicDoubleIsInfinite,
   kIntrinsicFloatIsNaN,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2dfa860..6317f5e 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -204,8 +204,7 @@
       return VRegKind::kDoubleHiVReg;
 
     default:
-      LOG(FATAL) << "Unexpected vreg location "
-                 << DexRegisterLocation::PrettyDescriptor(kind);
+      LOG(FATAL) << "Unexpected vreg location " << kind;
       UNREACHABLE();
   }
 }
@@ -456,12 +455,11 @@
         }
         default: {
           LOG(FATAL)
-              << "Unexpected location kind"
-              << DexRegisterLocation::PrettyDescriptor(
-                    vreg_map.GetLocationInternalKind(vreg,
-                                                     number_of_vregs,
-                                                     code_info,
-                                                     encoding));
+              << "Unexpected location kind "
+              << vreg_map.GetLocationInternalKind(vreg,
+                                                  number_of_vregs,
+                                                  code_info,
+                                                  encoding);
           UNREACHABLE();
         }
       }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index eb5455a..8c813b4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -316,6 +316,7 @@
   linear_alloc_.reset();
   low_4gb_arena_pool_.reset();
   arena_pool_.reset();
+  jit_arena_pool_.reset();
   MemMap::Shutdown();
   ATRACE_END();
 
@@ -1019,10 +1020,13 @@
   // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
   // can't be trimmed as easily.
   const bool use_malloc = IsAotCompiler();
-  arena_pool_.reset(new ArenaPool(use_malloc, false));
+  arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
+  jit_arena_pool_.reset(
+      new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
+
   if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
     // 4gb, no malloc. Explanation in header.
-    low_4gb_arena_pool_.reset(new ArenaPool(false, true));
+    low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
   }
   linear_alloc_.reset(CreateLinearAlloc());
 
@@ -1276,9 +1280,7 @@
   // libcore can't because it's the library that implements System.loadLibrary!
   {
     std::string error_msg;
-    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr,
-                                     /* is_shared_namespace */ false,
-                                     nullptr, nullptr, &error_msg)) {
+    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, &error_msg)) {
       LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
     }
   }
@@ -1287,9 +1289,7 @@
                                                 ? "libopenjdkd.so"
                                                 : "libopenjdk.so";
     std::string error_msg;
-    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr,
-                                     /* is_shared_namespace */ false,
-                                     nullptr, nullptr, &error_msg)) {
+    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, nullptr, &error_msg)) {
       LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
     }
   }
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8aac4ce..83e77d2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -561,6 +561,9 @@
   ArenaPool* GetArenaPool() {
     return arena_pool_.get();
   }
+  ArenaPool* GetJitArenaPool() {
+    return jit_arena_pool_.get();
+  }
   const ArenaPool* GetArenaPool() const {
     return arena_pool_.get();
   }
@@ -669,6 +672,7 @@
 
   gc::Heap* heap_;
 
+  std::unique_ptr<ArenaPool> jit_arena_pool_;
   std::unique_ptr<ArenaPool> arena_pool_;
   // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
   // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index a8b48ee..0e5b503 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -99,16 +99,16 @@
   }
 
   // Used to insert a new mapping at a known position for better performance.
-  iterator PutBefore(iterator pos, const K& k, const V& v) {
+  iterator PutBefore(const_iterator pos, const K& k, const V& v) {
     // Check that we're using the correct position and the key is not in the map.
     DCHECK(pos == map_.end() || map_.key_comp()(k, pos->first));
-    DCHECK(pos == map_.begin() || map_.key_comp()((--iterator(pos))->first, k));
+    DCHECK(pos == map_.begin() || map_.key_comp()((--const_iterator(pos))->first, k));
     return map_.emplace_hint(pos, k, v);
   }
-  iterator PutBefore(iterator pos, const K& k, V&& v) {
+  iterator PutBefore(const_iterator pos, const K& k, V&& v) {
     // Check that we're using the correct position and the key is not in the map.
     DCHECK(pos == map_.end() || map_.key_comp()(k, pos->first));
-    DCHECK(pos == map_.begin() || map_.key_comp()((--iterator(pos))->first, k));
+    DCHECK(pos == map_.begin() || map_.key_comp()((--const_iterator(pos))->first, k));
     return map_.emplace_hint(pos, k, std::move(v));
   }
 
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5faff93..ee5da8e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -352,12 +352,11 @@
       return false;
     default:
       LOG(FATAL)
-          << "Unexpected location kind"
-          << DexRegisterLocation::PrettyDescriptor(
-                dex_register_map.GetLocationInternalKind(vreg,
-                                                         number_of_dex_registers,
-                                                         code_info,
-                                                         encoding));
+          << "Unexpected location kind "
+          << dex_register_map.GetLocationInternalKind(vreg,
+                                                      number_of_dex_registers,
+                                                      code_info,
+                                                      encoding);
       UNREACHABLE();
   }
 }
@@ -740,7 +739,7 @@
       // Check class linker linear allocs.
       mirror::Class* klass = method->GetDeclaringClass();
       LinearAlloc* const class_linear_alloc = (klass != nullptr)
-          ? ClassLinker::GetAllocatorForClassLoader(klass->GetClassLoader())
+          ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
           : linear_alloc;
       if (!class_linear_alloc->Contains(method)) {
         // Check image space.
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 5544507..3093436 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -27,6 +27,31 @@
 constexpr uint32_t StackMap::kNoDexRegisterMap;
 constexpr uint32_t StackMap::kNoInlineInfo;
 
+std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind) {
+  using Kind = DexRegisterLocation::Kind;
+  switch (kind) {
+    case Kind::kNone:
+      return stream << "none";
+    case Kind::kInStack:
+      return stream << "in stack";
+    case Kind::kInRegister:
+      return stream << "in register";
+    case Kind::kInRegisterHigh:
+      return stream << "in register high";
+    case Kind::kInFpuRegister:
+      return stream << "in fpu register";
+    case Kind::kInFpuRegisterHigh:
+      return stream << "in fpu register high";
+    case Kind::kConstant:
+      return stream << "as constant";
+    case Kind::kInStackLargeOffset:
+      return stream << "in stack (large offset)";
+    case Kind::kConstantLargeValue:
+      return stream << "as constant (large value)";
+  }
+  return stream << "Kind<" << static_cast<uint32_t>(kind) << ">";
+}
+
 DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
     uint16_t dex_register_number,
     uint16_t number_of_dex_registers,
@@ -97,7 +122,7 @@
                                 const std::string& prefix = "v",
                                 const std::string& suffix = "") {
   os << prefix << dex_register_num << ": "
-     << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind())
+     << location.GetInternalKind()
      << " (" << location.GetValue() << ")" << suffix << '\n';
 }
 
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 97eb805..dbf23aa 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -110,30 +110,6 @@
       sizeof(Kind) == 1u,
       "art::DexRegisterLocation::Kind has a size different from one byte.");
 
-  static const char* PrettyDescriptor(Kind kind) {
-    switch (kind) {
-      case Kind::kNone:
-        return "none";
-      case Kind::kInStack:
-        return "in stack";
-      case Kind::kInRegister:
-        return "in register";
-      case Kind::kInRegisterHigh:
-        return "in register high";
-      case Kind::kInFpuRegister:
-        return "in fpu register";
-      case Kind::kInFpuRegisterHigh:
-        return "in fpu register high";
-      case Kind::kConstant:
-        return "as constant";
-      case Kind::kInStackLargeOffset:
-        return "in stack (large offset)";
-      case Kind::kConstantLargeValue:
-        return "as constant (large value)";
-    }
-    UNREACHABLE();
-  }
-
   static bool IsShortLocationKind(Kind kind) {
     switch (kind) {
       case Kind::kInStack:
@@ -149,7 +125,7 @@
         return false;
 
       case Kind::kNone:
-        LOG(FATAL) << "Unexpected location kind " << PrettyDescriptor(kind);
+        LOG(FATAL) << "Unexpected location kind";
     }
     UNREACHABLE();
   }
@@ -215,6 +191,8 @@
   friend class DexRegisterLocationHashFn;
 };
 
+std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind);
+
 /**
  * Store information on unique Dex register locations used in a method.
  * The information is of the form:
@@ -349,7 +327,7 @@
       case DexRegisterLocation::Kind::kConstantLargeValue:
       case DexRegisterLocation::Kind::kInStackLargeOffset:
       case DexRegisterLocation::Kind::kNone:
-        LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
+        LOG(FATAL) << "Unexpected location kind " << kind;
     }
     UNREACHABLE();
   }
@@ -373,7 +351,7 @@
       case DexRegisterLocation::Kind::kConstantLargeValue:
       case DexRegisterLocation::Kind::kInStackLargeOffset:
       case DexRegisterLocation::Kind::kNone:
-        LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
+        LOG(FATAL) << "Unexpected location kind " << kind;
     }
     UNREACHABLE();
   }
@@ -515,8 +493,7 @@
                       const StackMapEncoding& enc) const {
     DexRegisterLocation location =
         GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
-    DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant)
-        << DexRegisterLocation::PrettyDescriptor(location.GetKind());
+    DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
     return location.GetValue();
   }
 
@@ -530,7 +507,7 @@
            location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegisterHigh)
-        << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+        << location.GetInternalKind();
     return location.GetValue();
   }
 
diff --git a/runtime/thread.h b/runtime/thread.h
index 97c47e1..234750c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1324,8 +1324,8 @@
       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
-      last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
-      thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
+      last_no_thread_suspension_cause(nullptr), thread_local_objects(0),
+      thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
       mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
       thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
       nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
@@ -1440,10 +1440,12 @@
     QuickEntryPoints quick_entrypoints;
 
     // Thread-local allocation pointer.
+    size_t thread_local_objects;
     uint8_t* thread_local_start;
+    // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
+    // potentially better performance.
     uint8_t* thread_local_pos;
     uint8_t* thread_local_end;
-    size_t thread_local_objects;
 
     // Mterp jump table bases.
     void* mterp_current_ibase;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 5a4dfb8..2fba805 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -16,6 +16,11 @@
 
 #include "thread_pool.h"
 
+#include <pthread.h>
+
+#include <sys/time.h>
+#include <sys/resource.h>
+
 #include "base/bit_utils.h"
 #include "base/casts.h"
 #include "base/logging.h"
@@ -53,6 +58,19 @@
   CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
 }
 
+void ThreadPoolWorker::SetPthreadPriority(int priority) {
+  CHECK_GE(priority, PRIO_MIN);
+  CHECK_LE(priority, PRIO_MAX);
+#if defined(__ANDROID__)
+  int result = setpriority(PRIO_PROCESS, pthread_gettid_np(pthread_), priority);
+  if (result != 0) {
+    PLOG(ERROR) << "Failed to setpriority to :" << priority;
+  }
+#else
+  UNUSED(priority);
+#endif
+}
+
 void ThreadPoolWorker::Run() {
   Thread* self = Thread::Current();
   Task* task = nullptr;
@@ -214,4 +232,10 @@
   return tasks_.size();
 }
 
+void ThreadPool::SetPthreadPriority(int priority) {
+  for (ThreadPoolWorker* worker : threads_) {
+    worker->SetPthreadPriority(priority);
+  }
+}
+
 }  // namespace art
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 6cd4ad3..b6c6f02 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -59,6 +59,9 @@
 
   virtual ~ThreadPoolWorker();
 
+  // Set the "nice" priorty for this worker.
+  void SetPthreadPriority(int priority);
+
  protected:
   ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
   static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_);
@@ -111,6 +114,9 @@
   // thread count of the thread pool.
   void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_);
 
+  // Set the "nice" priorty for threads in the pool.
+  void SetPthreadPriority(int priority);
+
  protected:
   // get a task to run, blocks if there are no tasks left
   virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index cfa8329..d288943 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -385,7 +385,7 @@
   ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
   java_lang_Runtime_nativeLoad =
       CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad",
-                  "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)"
+                  "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)"
                       "Ljava/lang/String;");
 }
 
diff --git a/test/004-JniTest/build b/test/004-JniTest/build
new file mode 100755
index 0000000..e8e9f31
--- /dev/null
+++ b/test/004-JniTest/build
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# Make us exit on a failure.
+#
+set -e
+
+# Hard-wired use of experimental jack.
+# TODO: fix this temporary work-around for lambdas, see b/19467889
+export USE_JACK=true
+# export JACK_SERVER=false
+# export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks"
+
+# e.g. /foo/bar/jack-3.10.ALPHA.jar -> 3.10.ALPHA
+# export JACK_VERSION="$(find "$JACK_REPOSITORY" -name '*ALPHA*' | sed 's/.*jack-//g' | sed 's/[.]jar//g')"
+./default-build "$@" --experimental lambdas
diff --git a/test/004-JniTest/expected.txt b/test/004-JniTest/expected.txt
index 86ab37e..f7e404d 100644
--- a/test/004-JniTest/expected.txt
+++ b/test/004-JniTest/expected.txt
@@ -28,3 +28,33 @@
 RUNNING sub object, sub class, sub nonstatic
 Subclass.nonstaticMethod
 PASSED sub object, sub class, sub nonstatic
+Calling method ConcreteClass->JniCallNonOverridenDefaultMethod on object of type ConcreteClass
+DefaultInterface.JniCallNonOverridenDefaultMethod
+Calling method ConcreteClass->JniCallOverridenDefaultMethod on object of type ConcreteClass
+ConcreteClass.JniCallOverridenDefaultMethod
+Calling method ConcreteClass->JniCallOverridenDefaultMethodWithSuper on object of type ConcreteClass
+ConcreteClass.JniCallOverridenDefaultMethodWithSuper
+DefaultInterface.JniCallOverridenDefaultMethod
+Calling method ConcreteClass->JniCallOverridenAbstractMethod on object of type ConcreteClass
+ConcreteClass.JniCallOverridenAbstractMethod
+Calling method ConcreteClass->JniCallConflictDefaultMethod on object of type ConcreteClass
+EXCEPTION OCCURED: java.lang.IncompatibleClassChangeError: Conflicting default method implementations void ConflictInterface.JniCallConflictDefaultMethod()
+Calling method ConcreteClass->JniCallSoftConflictMethod on object of type ConcreteClass
+DefaultInterface.JniCallSoftConflictMethod
+Calling method DefaultInterface->JniCallNonOverridenDefaultMethod on object of type ConcreteClass
+DefaultInterface.JniCallNonOverridenDefaultMethod
+Calling method DefaultInterface->JniCallOverridenDefaultMethod on object of type ConcreteClass
+ConcreteClass.JniCallOverridenDefaultMethod
+Calling method DefaultInterface->JniCallOverridenAbstractMethod on object of type ConcreteClass
+ConcreteClass.JniCallOverridenAbstractMethod
+Calling method DefaultInterface->JniCallConflictDefaultMethod on object of type ConcreteClass
+EXCEPTION OCCURED: java.lang.IncompatibleClassChangeError: Conflicting default method implementations void ConflictInterface.JniCallConflictDefaultMethod()
+Calling method DefaultInterface->JniCallSoftConflictMethod on object of type ConcreteClass
+DefaultInterface.JniCallSoftConflictMethod
+Calling method AbstractInterface->JniCallSoftConflictMethod on object of type ConcreteClass
+DefaultInterface.JniCallSoftConflictMethod
+Calling method ConflictInterface->JniCallConflictDefaultMethod on object of type ConcreteClass
+EXCEPTION OCCURED: java.lang.IncompatibleClassChangeError: Conflicting default method implementations void ConflictInterface.JniCallConflictDefaultMethod()
+hi-lambda: λ
+hi-default δλ
+hi-default δλ
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 7045482..2bdf8d1 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -659,3 +659,84 @@
     env->ReleasePrimitiveArrayCritical(array0, data0, 0);
   }
 }
+
+class JniCallDefaultMethodsTest {
+ public:
+  explicit JniCallDefaultMethodsTest(JNIEnv* env)
+      : env_(env), concrete_class_(env_->FindClass("ConcreteClass")) {
+    assert(!env_->ExceptionCheck());
+    assert(concrete_class_ != nullptr);
+  }
+
+  void Test() {
+    TestCalls("ConcreteClass", { "JniCallNonOverridenDefaultMethod",
+                                 "JniCallOverridenDefaultMethod",
+                                 "JniCallOverridenDefaultMethodWithSuper",
+                                 "JniCallOverridenAbstractMethod",
+                                 "JniCallConflictDefaultMethod",
+                                 "JniCallSoftConflictMethod" });
+    TestCalls("DefaultInterface", { "JniCallNonOverridenDefaultMethod",
+                                    "JniCallOverridenDefaultMethod",
+                                    "JniCallOverridenAbstractMethod",
+                                    "JniCallConflictDefaultMethod",
+                                    "JniCallSoftConflictMethod" });
+    TestCalls("AbstractInterface", { "JniCallSoftConflictMethod" });
+    TestCalls("ConflictInterface", { "JniCallConflictDefaultMethod" });
+  }
+
+ private:
+  void TestCalls(const char* declaring_class, std::vector<const char*> methods) {
+    jmethodID new_method = env_->GetMethodID(concrete_class_, "<init>", "()V");
+    jobject obj = env_->NewObject(concrete_class_, new_method);
+    assert(!env_->ExceptionCheck());
+    assert(obj != nullptr);
+    jclass decl_class = env_->FindClass(declaring_class);
+    assert(!env_->ExceptionCheck());
+    assert(decl_class != nullptr);
+    for (const char* method : methods) {
+      jmethodID method_id = env_->GetMethodID(decl_class, method, "()V");
+      assert(!env_->ExceptionCheck());
+      printf("Calling method %s->%s on object of type ConcreteClass\n", declaring_class, method);
+      env_->CallVoidMethod(obj, method_id);
+      if (env_->ExceptionCheck()) {
+        jthrowable thrown = env_->ExceptionOccurred();
+        env_->ExceptionClear();
+        jmethodID to_string = env_->GetMethodID(
+            env_->FindClass("java/lang/Object"), "toString", "()Ljava/lang/String;");
+        jstring exception_string = (jstring) env_->CallObjectMethod(thrown, to_string);
+        assert(!env_->ExceptionCheck());
+        const char* exception_string_utf8 = env_->GetStringUTFChars(exception_string, nullptr);
+        assert(!env_->ExceptionCheck());
+        assert(exception_string_utf8 != nullptr);
+        printf("EXCEPTION OCCURED: %s\n", exception_string_utf8);
+        env_->ReleaseStringUTFChars(exception_string, exception_string_utf8);
+      }
+    }
+  }
+
+  JNIEnv* env_;
+  jclass concrete_class_;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testCallDefaultMethods(JNIEnv* env) {
+  JniCallDefaultMethodsTest(env).Test();
+}
+
+static void InvokeSpecificMethod(JNIEnv* env, jobject obj, const char* method) {
+  jclass lambda_class = env->FindClass("LambdaInterface");
+  assert(!env->ExceptionCheck());
+  assert(lambda_class != nullptr);
+  jmethodID method_id = env->GetMethodID(lambda_class, method, "()V");
+  assert(!env->ExceptionCheck());
+  env->CallVoidMethod(obj, method_id);
+  assert(!env->ExceptionCheck());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaDefaultMethod(
+    JNIEnv* e, jclass, jobject l) {
+  InvokeSpecificMethod(e, l, "sayHiTwice");
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaMethod(JNIEnv* e, jclass, jobject l) {
+  InvokeSpecificMethod(e, l, "sayHi");
+}
diff --git a/test/004-JniTest/smali/AbstractInterface.smali b/test/004-JniTest/smali/AbstractInterface.smali
new file mode 100644
index 0000000..52b2fc5
--- /dev/null
+++ b/test/004-JniTest/smali/AbstractInterface.smali
@@ -0,0 +1,26 @@
+# /*
+#  * Copyright 2016 The Android Open Source Project
+#  *
+#  * Licensed under the Apache License, Version 2.0 (the "License");
+#  * you may not use this file except in compliance with the License.
+#  * You may obtain a copy of the License at
+#  *
+#  *      http://www.apache.org/licenses/LICENSE-2.0
+#  *
+#  * Unless required by applicable law or agreed to in writing, software
+#  * distributed under the License is distributed on an "AS IS" BASIS,
+#  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  * See the License for the specific language governing permissions and
+#  * limitations under the License.
+#  */
+
+.class public interface LAbstractInterface;
+.super Ljava/lang/Object;
+
+# public interface AbstractInterface {
+#     public void JniCallSoftConflictMethod();
+# }
+
+.method public abstract JniCallSoftConflictMethod()V
+.end method
+
diff --git a/test/004-JniTest/smali/ConcreteClass.smali b/test/004-JniTest/smali/ConcreteClass.smali
new file mode 100644
index 0000000..a9c072f
--- /dev/null
+++ b/test/004-JniTest/smali/ConcreteClass.smali
@@ -0,0 +1,72 @@
+# /*
+#  * Copyright 2016 The Android Open Source Project
+#  *
+#  * Licensed under the Apache License, Version 2.0 (the "License");
+#  * you may not use this file except in compliance with the License.
+#  * You may obtain a copy of the License at
+#  *
+#  *      http://www.apache.org/licenses/LICENSE-2.0
+#  *
+#  * Unless required by applicable law or agreed to in writing, software
+#  * distributed under the License is distributed on an "AS IS" BASIS,
+#  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  * See the License for the specific language governing permissions and
+#  * limitations under the License.
+#  */
+
+.class public LConcreteClass;
+.super Ljava/lang/Object;
+.implements LDefaultInterface;
+.implements LConflictInterface;
+.implements LAbstractInterface;
+
+# public class ConcreteClass implements DefaultInterface, ConflictInterface, AbstractInterface {
+#     public void JniCallOverridenAbstractMethod() {
+#         System.out.println("ConcreteClass.JniCallOverridenAbstractMethod");
+#     }
+#
+#     public void JniCallOverridenDefaultMethod() {
+#         System.out.println("ConcreteClass.JniCallOverridenDefaultMethod");
+#     }
+#
+#     public void JniCallOverridenDefaultMethodWithSuper() {
+#         System.out.println("ConcreteClass.JniCallOverridenDefaultMethodWithSuper");
+#         DefaultInterface.super.JniCallOverridenDefaultMethod();
+#     }
+# }
+
+.method public constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    return-void
+.end method
+
+.method public JniCallOverridenAbstractMethod()V
+    .locals 2
+
+    const-string v0, "ConcreteClass.JniCallOverridenAbstractMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public JniCallOverridenDefaultMethod()V
+    .locals 2
+
+    const-string v0, "ConcreteClass.JniCallOverridenDefaultMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public JniCallOverridenDefaultMethodWithSuper()V
+    .locals 2
+
+    const-string v0, "ConcreteClass.JniCallOverridenDefaultMethodWithSuper"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+    invoke-super {p0}, LDefaultInterface;->JniCallOverridenDefaultMethod()V
+
+    return-void
+.end method
diff --git a/test/004-JniTest/smali/ConflictInterface.smali b/test/004-JniTest/smali/ConflictInterface.smali
new file mode 100644
index 0000000..fc3d474
--- /dev/null
+++ b/test/004-JniTest/smali/ConflictInterface.smali
@@ -0,0 +1,35 @@
+# /*
+#  * Copyright 2016 The Android Open Source Project
+#  *
+#  * Licensed under the Apache License, Version 2.0 (the "License");
+#  * you may not use this file except in compliance with the License.
+#  * You may obtain a copy of the License at
+#  *
+#  *      http://www.apache.org/licenses/LICENSE-2.0
+#  *
+#  * Unless required by applicable law or agreed to in writing, software
+#  * distributed under the License is distributed on an "AS IS" BASIS,
+#  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  * See the License for the specific language governing permissions and
+#  * limitations under the License.
+#  */
+
+.class public interface LConflictInterface;
+.super Ljava/lang/Object;
+
+# public interface ConflictInterface {
+#     public default void JniCallConflictDefaultMethod() {
+#         System.out.println("ConflictInterface.JniCallConflictDefaultMethod");
+#     }
+#
+# }
+
+.method public JniCallConflictDefaultMethod()V
+    .locals 2
+
+    const-string v0, "ConflictInterface.JniCallConflictDefaultMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
diff --git a/test/004-JniTest/smali/DefaultInterface.smali b/test/004-JniTest/smali/DefaultInterface.smali
new file mode 100644
index 0000000..1ee8721
--- /dev/null
+++ b/test/004-JniTest/smali/DefaultInterface.smali
@@ -0,0 +1,77 @@
+# /*
+#  * Copyright 2016 The Android Open Source Project
+#  *
+#  * Licensed under the Apache License, Version 2.0 (the "License");
+#  * you may not use this file except in compliance with the License.
+#  * You may obtain a copy of the License at
+#  *
+#  *      http://www.apache.org/licenses/LICENSE-2.0
+#  *
+#  * Unless required by applicable law or agreed to in writing, software
+#  * distributed under the License is distributed on an "AS IS" BASIS,
+#  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  * See the License for the specific language governing permissions and
+#  * limitations under the License.
+#  */
+
+.class public interface LDefaultInterface;
+.super Ljava/lang/Object;
+
+# public interface DefaultInterface {
+#     public default void JniCallNonOverridenDefaultMethod() {
+#         System.out.println("DefaultInterface.JniCallNonOverridenDefaultMethod");
+#     }
+#
+#     public default void JniCallOverridenDefaultMethod() {
+#         System.out.println("DefaultInterface.JniCallOverridenDefaultMethod");
+#     }
+#
+#     public void JniCallOverridenAbstractMethod();
+#
+#     public default void JniCallConflictDefaultMethod() {
+#         System.out.println("DefaultInterface.JniCallConflictDefaultMethod");
+#     }
+#
+#     public default void JniCallSoftConflictMethod() {
+#         System.out.println("DefaultInterface.JniCallSoftConflictMethod");
+#     }
+# }
+
+.method public JniCallNonOverridenDefaultMethod()V
+    .locals 2
+
+    const-string v0, "DefaultInterface.JniCallNonOverridenDefaultMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public JniCallOverridenDefaultMethod()V
+    .locals 2
+
+    const-string v0, "DefaultInterface.JniCallOverridenDefaultMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public abstract JniCallOverridenAbstractMethod()V
+.end method
+
+.method public JniCallConflictDefaultMethod()V
+    .locals 2
+
+    const-string v0, "DefaultInterface.JniCallConflictDefaultMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public JniCallSoftConflictMethod()V
+    .locals 2
+
+    const-string v0, "DefaultInterface.JniCallSoftConflictMethod"
+    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+    invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+    return-void
+.end method
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 5c39ede..e0530d8 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -39,8 +39,15 @@
         testRemoveLocalObject();
         testProxyGetMethodID();
         testJniCriticalSectionAndGc();
+        testCallDefaultMethods();
+        String lambda = "λ";
+        testInvokeLambdaMethod(() -> { System.out.println("hi-lambda: " + lambda); });
+        String def = "δ";
+        testInvokeLambdaDefaultMethod(() -> { System.out.println("hi-default " + def + lambda); });
     }
 
+    private static native void testCallDefaultMethods();
+
     private static native void testFindClassOnAttachedNativeThread();
 
     private static boolean testFindFieldOnAttachedNativeThreadField;
@@ -121,7 +128,7 @@
     private static void testRemoveLocalObject() {
         removeLocalObject(new Object());
     }
-    
+
     private static native short shortMethod(short s1, short s2, short s3, short s4, short s5, short s6, short s7,
         short s8, short s9, short s10);
 
@@ -252,6 +259,19 @@
     }
 
     private static native void enterJniCriticalSection(int arraySize, byte[] array0, byte[] array);
+
+    private static native void testInvokeLambdaMethod(LambdaInterface iface);
+
+    private static native void testInvokeLambdaDefaultMethod(LambdaInterface iface);
+}
+
+@FunctionalInterface
+interface LambdaInterface {
+  public void sayHi();
+  public default void sayHiTwice() {
+    sayHi();
+    sayHi();
+  }
 }
 
 class JniCallNonvirtualTest {
diff --git a/test/004-ReferenceMap/build b/test/004-ReferenceMap/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/004-ReferenceMap/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
-  --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 2dbd7e8..2d26fa1 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -65,19 +65,19 @@
       // We eliminate the non-live registers at a return, so only v3 is live.
       // Note that it is OK for a compiler to not have a dex map at this dex PC because
       // a return is not necessarily a safepoint.
-      CHECK_REGS_CONTAIN_REFS(0x13U, false, 3);  // v3: y
+      CHECK_REGS_CONTAIN_REFS(0x14U, false, 2);  // v2: y
       // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
       CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
       if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
-        // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
-        CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1);
-        // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
-        CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
-        // v5 is removed from the root set because there is a "merge" operation.
-        // See 0015: if-nez v2, 001f.
-        CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
+        // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
+        CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 4, 2, 1);
+        // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
+        CHECK_REGS_CONTAIN_REFS(0x1eU, true, 8, 4, 2, 1);
+        // v4 is removed from the root set because there is a "merge" operation.
+        // See 0016: if-nez v2, 0020.
+        CHECK_REGS_CONTAIN_REFS(0x20U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
       }
-      CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
+      CHECK_REGS_CONTAIN_REFS(0x22U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
 
       if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
@@ -94,124 +94,79 @@
   }
 };
 
-// Dex instructions for the function 'f' in ReferenceMap.java
-// Virtual methods   -
-//    #0              : (in LReferenceMap;)
-//      name          : 'f'
-//      type          : '()Ljava/lang/Object;'
-//      access        : 0x0000 ()
-//      code          -
-//      registers     : 9
-//      ins           : 1
-//      outs          : 2
-//      insns size    : 51 16-bit code units
-//      |[0001e8] ReferenceMap.f:()Ljava/lang/Object;
-//      |0000: const/4 v4, #int 2 // #2
-//      |0001: const/4 v7, #int 0 // #0
-//      |0002: const/4 v6, #int 1 // #1
+// DEX code
 //
-// 0:[Unknown],1:[Unknown],2:[Unknown],3:[Unknown],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0003: new-array v1, v4, [Ljava/lang/Object;  // type@0007
-//      |0005: const/4 v2, #int 0 // #0
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Unknown],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0006: new-instance v3, Ljava/lang/Object;  // type@0003
-
-// [Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Uninitialized Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0008: +invoke-object-init/range {}, Ljava/lang/Object;.<init>:()V // method@0005
-//      |000b: const/4 v4, #int 2 // #2
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |000c: aput-object v3, v1, v4
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |000e: aput-object v3, v1, v6
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0010: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-
-// 0:[Conflict],1:[Conflict],2:[Conflict],3:[Reference: java.lang.Object],4:[Conflict],5:[Conflict],6:[Conflict],7:[Conflict],8:[Conflict],
-//      |0013: return-object v3
-//      |0014: move-exception v0
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0015: if-nez v2, 001f // +000a
-//      |0017: const/4 v4, #int 1 // #1
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0018: new-instance v5, Ljava/lang/Object;  // type@0003
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Uninitialized Reference: java.lang.Object],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |001a: +invoke-object-init/range {}, Ljava/lang/Object;.<init>:()V // method@0005
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Reference: java.lang.Object],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |001d: aput-object v5, v1, v4
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |001f: aput-object v2, v1, v6
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0021: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-//      |0024: move-object v3, v2
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0025: goto 0013 // -0012
-//      |0026: move-exception v4
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0027: aput-object v2, v1, v6
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0029: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |002c: throw v4
-//      |002d: move-exception v4
-//      |002e: move-object v2, v3
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[Reference: java.lang.Throwable],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |002f: goto 0027 // -0008
-//      |0030: move-exception v0
-//      |0031: move-object v2, v3
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-//      |0032: goto 0015 // -001d
-//      catches       : 3
-//        0x0006 - 0x000b
-//          Ljava/lang/Exception; -> 0x0014
-//          <any> -> 0x0026
-//        0x000c - 0x000e
-//          Ljava/lang/Exception; -> 0x0030
-//          <any> -> 0x002d
-//        0x0018 - 0x001f
-//          <any> -> 0x0026
-//      positions     :
-//        0x0003 line=8
-//        0x0005 line=9
-//        0x0006 line=11
-//        0x000b line=12
-//        0x000e line=18
-//        0x0010 line=19
-//        0x0013 line=21
-//        0x0014 line=13
-//        0x0015 line=14
-//        0x0017 line=15
-//        0x001f line=18
-//        0x0021 line=19
-//        0x0025 line=20
-//        0x0026 line=18
-//        0x0029 line=19
-//        0x002d line=18
-//        0x0030 line=13
-//      locals        :
-//        0x0006 - 0x000b reg=2 y Ljava/lang/Object;
-//        0x000b - 0x0013 reg=3 y Ljava/lang/Object;
-//        0x0014 - 0x0015 reg=2 y Ljava/lang/Object;
-//        0x0015 - 0x0026 reg=0 ex Ljava/lang/Exception;
-//        0x002d - 0x0032 reg=3 y Ljava/lang/Object;
-//        0x0005 - 0x0033 reg=1 x [Ljava/lang/Object;
-//        0x0032 - 0x0033 reg=2 y Ljava/lang/Object;
-//        0x0000 - 0x0033 reg=8 this LReferenceMap;
+// 0000: const/4 v4, #int 2 // #2
+// 0001: const/4 v7, #int 0 // #0
+// 0002: const/4 v6, #int 1 // #1
+// 0003: new-array v1, v4, [Ljava/lang/Object; // type@0007
+// 0005: const/4 v2, #int 0 // #0
+// 0006: new-instance v3, Ljava/lang/Object; // type@0003
+// 0008: invoke-direct {v3}, Ljava/lang/Object;.<init>:()V // method@0004
+// 000b: const/4 v4, #int 2 // #2
+// 000c: aput-object v3, v1, v4
+// 000e: aput-object v3, v1, v6
+// 0010: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 0013: move-object v2, v3
+// 0014: return-object v2
+// 0015: move-exception v0
+// 0016: if-nez v2, 0020 // +000a
+// 0018: new-instance v4, Ljava/lang/Object; // type@0003
+// 001a: invoke-direct {v4}, Ljava/lang/Object;.<init>:()V // method@0004
+// 001d: const/4 v5, #int 1 // #1
+// 001e: aput-object v4, v1, v5
+// 0020: aput-object v2, v1, v6
+// 0022: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 0025: goto 0014 // -0011
+// 0026: move-exception v4
+// 0027: aput-object v2, v1, v6
+// 0029: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 002c: throw v4
+// 002d: move-exception v4
+// 002e: move-object v2, v3
+// 002f: goto 0027 // -0008
+// 0030: move-exception v0
+// 0031: move-object v2, v3
+// 0032: goto 0016 // -001c
+//    catches       : 3
+//      0x0006 - 0x000b
+//        Ljava/lang/Exception; -> 0x0015
+//        <any> -> 0x0026
+//      0x000c - 0x000e
+//        Ljava/lang/Exception; -> 0x0030
+//        <any> -> 0x002d
+//      0x0018 - 0x0020
+//        <any> -> 0x0026
+//    positions     :
+//      0x0003 line=22
+//      0x0005 line=23
+//      0x0006 line=25
+//      0x000b line=26
+//      0x000e line=32
+//      0x0010 line=33
+//      0x0014 line=35
+//      0x0015 line=27
+//      0x0016 line=28
+//      0x0018 line=29
+//      0x0020 line=32
+//      0x0022 line=33
+//      0x0026 line=31
+//      0x0027 line=32
+//      0x0029 line=33
+//      0x002c line=31
+//      0x0030 line=27
+//    locals        :
+//      0x0006 - 0x000b reg=2 y Ljava/lang/Object;
+//      0x000b - 0x0014 reg=3 y Ljava/lang/Object;
+//      0x0015 - 0x0016 reg=2 y Ljava/lang/Object;
+//      0x0016 - 0x0026 reg=0 ex Ljava/lang/Exception;
+//      0x002d - 0x002f reg=3 y Ljava/lang/Object;
+//      0x002f - 0x0030 reg=2 y Ljava/lang/Object;
+//      0x0030 - 0x0032 reg=3 y Ljava/lang/Object;
+//      0x0031 - 0x0033 reg=0 ex Ljava/lang/Exception;
+//      0x0005 - 0x0033 reg=1 x [Ljava/lang/Object;
+//      0x0032 - 0x0033 reg=2 y Ljava/lang/Object;
+//      0x0000 - 0x0033 reg=8 this LMain;
 
 extern "C" JNIEXPORT jint JNICALL Java_Main_refmap(JNIEnv*, jobject, jint count) {
   // Visitor
diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/004-StackWalk/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
-  --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 3a5854b..51bb68f 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -42,31 +42,31 @@
     // Given the method name and the number of times the method has been called,
     // we know the Dex registers with live reference values. Assert that what we
     // find is what is expected.
-    if (m_name == "f") {
+    if (m_name == "$noinline$f") {
       if (gJava_StackWalk_refmap_calls == 1) {
         CHECK_EQ(1U, GetDexPc());
-        CHECK_REGS(4);
+        CHECK_REGS(1);  // v1: this
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
         CHECK_EQ(5U, GetDexPc());
-        CHECK_REGS(4);
+        CHECK_REGS(1);  // v1: this
       }
     } else if (m_name == "g") {
       if (gJava_StackWalk_refmap_calls == 1) {
-        CHECK_EQ(0xcU, GetDexPc());
-        CHECK_REGS(0, 2);  // Note that v1 is not in the minimal root set
+        CHECK_EQ(0xdU, GetDexPc());
+        CHECK_REGS(0, 2);  // v2: this (Note that v1 is not in the minimal root set)
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
-        CHECK_EQ(0xcU, GetDexPc());
+        CHECK_EQ(0xdU, GetDexPc());
         CHECK_REGS(0, 2);
       }
     } else if (m_name == "shlemiel") {
       if (gJava_StackWalk_refmap_calls == 1) {
-        CHECK_EQ(0x380U, GetDexPc());
+        CHECK_EQ(0x393U, GetDexPc());
         CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
       } else {
         CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
-        CHECK_EQ(0x380U, GetDexPc());
+        CHECK_EQ(0x393U, GetDexPc());
         CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
       }
     }
diff --git a/test/020-string/expected.txt b/test/020-string/expected.txt
index 081fea3..76b8929 100644
--- a/test/020-string/expected.txt
+++ b/test/020-string/expected.txt
@@ -5,3 +5,9 @@
 Got expected exception
 subStr is 'uick brown fox jumps over the lazy '
 Indexes are: 0:-1:0:43:33:-1:18:13:13:-1:18:18:-1:13:-1:-1:-1
+Got expected exception
+Got expected exception
+Got expected exception
+Got expected exception
+Got expected exception
+llo And
diff --git a/test/020-string/src/Main.java b/test/020-string/src/Main.java
index b876e6a..7108082 100644
--- a/test/020-string/src/Main.java
+++ b/test/020-string/src/Main.java
@@ -25,6 +25,7 @@
         basicTest();
         indexTest();
         constructorTest();
+        copyTest();
     }
 
     public static void basicTest() {
@@ -117,4 +118,48 @@
         String s14 = new String(codePoints, 1, 3);
         String s15 = new String(stringBuilder);
     }
+
+    public static void copyTest() {
+        String src = new String("Hello Android");
+        char[] dst = new char[7];
+        char[] tmp = null;
+
+        try {
+            src.getChars(2, 9, tmp, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (NullPointerException npe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(-1, 9, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 19, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 1, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 10, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (ArrayIndexOutOfBoundsException aioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        src.getChars(2, 9, dst, 0);
+        System.out.println(new String(dst));
+    }
 }
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index e5c9dba..93a9005 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -826,6 +826,8 @@
     Assert.assertEquals(Math.round(-2.5f), -2);
     Assert.assertEquals(Math.round(-2.9f), -3);
     Assert.assertEquals(Math.round(-3.0f), -3);
+    // 0.4999999701976776123046875
+    Assert.assertEquals(Math.round(Float.intBitsToFloat(0x3EFFFFFF)), (int)+0.0f);
     Assert.assertEquals(Math.round(16777215.0f), 16777215);  // 2^24 - 1
     Assert.assertEquals(Math.round(Float.NaN), (int)+0.0f);
     Assert.assertEquals(Math.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
@@ -1037,6 +1039,7 @@
     Assert.assertEquals(StrictMath.round(-2.9d), -3l);
     Assert.assertEquals(StrictMath.round(-3.0d), -3l);
     Assert.assertEquals(StrictMath.round(0.49999999999999994d), 0l);
+    Assert.assertEquals(StrictMath.round(9007199254740991.0d), 9007199254740991l);  // 2^53 - 1
     Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
     Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
     Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
@@ -1058,6 +1061,9 @@
     Assert.assertEquals(StrictMath.round(-2.5f), -2);
     Assert.assertEquals(StrictMath.round(-2.9f), -3);
     Assert.assertEquals(StrictMath.round(-3.0f), -3);
+    // 0.4999999701976776123046875
+    Assert.assertEquals(StrictMath.round(Float.intBitsToFloat(0x3EFFFFFF)), (int)+0.0f);
+    Assert.assertEquals(StrictMath.round(16777215.0f), 16777215);  // 2^24 - 1
     Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
     Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
     Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
diff --git a/test/089-many-methods/build b/test/089-many-methods/build
index ff77c60..58144e1 100644
--- a/test/089-many-methods/build
+++ b/test/089-many-methods/build
@@ -43,8 +43,4 @@
     printf("}\n") > fileName;
 }'
 
-# The test relies on the error message produced by dx, not jack, so keep building with dx for now
-# (b/19467889).
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx1024m --dex --no-optimize classes
+./default-build
diff --git a/test/089-many-methods/expected.txt b/test/089-many-methods/expected.txt
index b74e0ee..bfee8b3 100644
--- a/test/089-many-methods/expected.txt
+++ b/test/089-many-methods/expected.txt
@@ -1,6 +1,2 @@
-
-trouble writing output: Too many field references: 131000; max is 65536.
-You may try using --multi-dex option.
-References by package:
-131000 default
-build exit status: 2
+ERROR: Dex writing phase: classes.dex has too many IDs. Try using multi-dex
+build exit status: 4
diff --git a/test/138-duplicate-classes-check2/build b/test/138-duplicate-classes-check2/build
index abcbbb8..d346251 100755
--- a/test/138-duplicate-classes-check2/build
+++ b/test/138-duplicate-classes-check2/build
@@ -24,9 +24,17 @@
 ${JAVAC} -d classes-ex `find src-ex -name '*.java'`
 rm classes-ex/A.class
 
-if [ ${NEED_DEX} = "true" ]; then
+if [ ${USE_JACK} = "true" ]; then
+  jar cf classes.jill.jar -C classes .
+  ${JACK} --import classes.jill.jar --output-dex .
+  zip ${TEST_NAME}.jar classes.dex
+
+  jar cf classes-ex.jill.jar -C classes-ex .
+  ${JACK} --import classes-ex.jill.jar --output-dex .
+  zip ${TEST_NAME}-ex.jar classes.dex
+elif [ ${NEED_DEX} = "true" ]; then
   ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
-  zip $TEST_NAME.jar classes.dex
+  zip ${TEST_NAME}.jar classes.dex
   ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
   zip ${TEST_NAME}-ex.jar classes.dex
 fi
diff --git a/test/145-alloc-tracking-stress/expected.txt b/test/145-alloc-tracking-stress/expected.txt
new file mode 100644
index 0000000..134d8d0
--- /dev/null
+++ b/test/145-alloc-tracking-stress/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/145-alloc-tracking-stress/info.txt b/test/145-alloc-tracking-stress/info.txt
new file mode 100644
index 0000000..443062d
--- /dev/null
+++ b/test/145-alloc-tracking-stress/info.txt
@@ -0,0 +1 @@
+Regression test for b/18661622
diff --git a/test/145-alloc-tracking-stress/src/Main.java b/test/145-alloc-tracking-stress/src/Main.java
new file mode 100644
index 0000000..752fdd9
--- /dev/null
+++ b/test/145-alloc-tracking-stress/src/Main.java
@@ -0,0 +1,74 @@
+/*
+
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Map;
+
+public class Main implements Runnable {
+    static final int numberOfThreads = 4;
+    static final int totalOperations = 1000;
+    static Method enableAllocTrackingMethod;
+    static Object holder;
+    static volatile boolean trackingThreadDone = false;
+    int threadIndex;
+
+    Main(int index) {
+        threadIndex = index;
+    }
+
+    public static void main(String[] args) throws Exception {
+      Class klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+      if (klass == null) {
+          throw new AssertionError("Couldn't find DdmVmInternal class");
+      }
+      enableAllocTrackingMethod = klass.getDeclaredMethod("enableRecentAllocations",
+              Boolean.TYPE);
+      if (enableAllocTrackingMethod == null) {
+          throw new AssertionError("Couldn't find enableRecentAllocations method");
+      }
+
+      final Thread[] threads = new Thread[numberOfThreads];
+      for (int t = 0; t < threads.length; t++) {
+          threads[t] = new Thread(new Main(t));
+          threads[t].start();
+      }
+      for (Thread t : threads) {
+          t.join();
+      }
+      System.out.println("Finishing");
+    }
+
+    public void run() {
+        if (threadIndex == 0) {
+            for (int i = 0; i < totalOperations; ++i) {
+                try {
+                    enableAllocTrackingMethod.invoke(null, true);
+                    holder = new Object();
+                    enableAllocTrackingMethod.invoke(null, false);
+                } catch (Exception e) {
+                    System.out.println(e);
+                    return;
+                }
+            }
+            trackingThreadDone = true;
+        } else {
+            while (!trackingThreadDone) {
+                holder = new Object();
+            }
+        }
+    }
+}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 93fe397..b7712a7 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -51,6 +51,21 @@
     }
   }
 
+  private static int $inline$int(int x) {
+    return x;
+  }
+
+  private static long $inline$long(long x) {
+    return x;
+  }
+
+  private static float $inline$float(float x) {
+    return x;
+  }
+
+  private static double $inline$double(double x) {
+    return x;
+  }
 
   // Wrappers around methods located in file TestCmp.smali.
 
@@ -194,121 +209,119 @@
     return y;
   }
 
-
   /**
    * Exercise constant folding on addition.
    */
 
-  /// CHECK-START: int Main.IntAddition1() constant_folding (before)
+  /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:i\d+>>  IntConstant 1
   /// CHECK-DAG:     <<Const2:i\d+>>  IntConstant 2
   /// CHECK-DAG:     <<Add:i\d+>>     Add [<<Const1>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Add>>]
 
-  /// CHECK-START: int Main.IntAddition1() constant_folding (after)
+  /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3:i\d+>>  IntConstant 3
   /// CHECK-DAG:                      Return [<<Const3>>]
 
-  /// CHECK-START: int Main.IntAddition1() constant_folding (after)
+  /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
 
   public static int IntAddition1() {
     int a, b, c;
-    a = 1;
-    b = 2;
+    a = $inline$int(1);
+    b = $inline$int(2);
     c = a + b;
     return c;
   }
 
-  /// CHECK-START: int Main.IntAddition2() constant_folding (before)
+  /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:i\d+>>  IntConstant 1
   /// CHECK-DAG:     <<Const2:i\d+>>  IntConstant 2
   /// CHECK-DAG:     <<Const5:i\d+>>  IntConstant 5
   /// CHECK-DAG:     <<Const6:i\d+>>  IntConstant 6
-  /// CHECK-DAG:     <<Const11:i\d+>> IntConstant 11
   /// CHECK-DAG:     <<Add1:i\d+>>    Add [<<Const1>>,<<Const2>>]
-  /// CHECK-DAG:                      Add [<<Const5>>,<<Const6>>]
-  /// CHECK-DAG:     <<Add3:i\d+>>    Add [<<Add1>>,<<Const11>>]
+  /// CHECK-DAG:     <<Add2:i\d+>>    Add [<<Const5>>,<<Const6>>]
+  /// CHECK-DAG:     <<Add3:i\d+>>    Add [<<Add1>>,<<Add2>>]
   /// CHECK-DAG:                      Return [<<Add3>>]
 
-  /// CHECK-START: int Main.IntAddition2() constant_folding (after)
+  /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const14:i\d+>> IntConstant 14
   /// CHECK-DAG:                      Return [<<Const14>>]
 
-  /// CHECK-START: int Main.IntAddition2() constant_folding (after)
+  /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
 
   public static int IntAddition2() {
     int a, b, c;
-    a = 1;
-    b = 2;
+    a = $inline$int(1);
+    b = $inline$int(2);
     a += b;
-    b = 5;
-    c = 6;
+    b = $inline$int(5);
+    c = $inline$int(6);
     b += c;
     c = a + b;
     return c;
   }
 
-  /// CHECK-START: long Main.LongAddition() constant_folding (before)
+  /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:j\d+>>  LongConstant 1
   /// CHECK-DAG:     <<Const2:j\d+>>  LongConstant 2
   /// CHECK-DAG:     <<Add:j\d+>>     Add [<<Const1>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Add>>]
 
-  /// CHECK-START: long Main.LongAddition() constant_folding (after)
+  /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3:j\d+>>  LongConstant 3
   /// CHECK-DAG:                      Return [<<Const3>>]
 
-  /// CHECK-START: long Main.LongAddition() constant_folding (after)
+  /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
 
   public static long LongAddition() {
     long a, b, c;
-    a = 1L;
-    b = 2L;
+    a = $inline$long(1L);
+    b = $inline$long(2L);
     c = a + b;
     return c;
   }
 
-  /// CHECK-START: float Main.FloatAddition() constant_folding (before)
+  /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:f\d+>>  FloatConstant 1
   /// CHECK-DAG:     <<Const2:f\d+>>  FloatConstant 2
   /// CHECK-DAG:     <<Add:f\d+>>     Add [<<Const1>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Add>>]
 
-  /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+  /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3:f\d+>>  FloatConstant 3
   /// CHECK-DAG:                      Return [<<Const3>>]
 
-  /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+  /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
 
   public static float FloatAddition() {
     float a, b, c;
-    a = 1F;
-    b = 2F;
+    a = $inline$float(1F);
+    b = $inline$float(2F);
     c = a + b;
     return c;
   }
 
-  /// CHECK-START: double Main.DoubleAddition() constant_folding (before)
+  /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:d\d+>>  DoubleConstant 1
   /// CHECK-DAG:     <<Const2:d\d+>>  DoubleConstant 2
   /// CHECK-DAG:     <<Add:d\d+>>     Add [<<Const1>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Add>>]
 
-  /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+  /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3:d\d+>>  DoubleConstant 3
   /// CHECK-DAG:                      Return [<<Const3>>]
 
-  /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+  /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
 
   public static double DoubleAddition() {
     double a, b, c;
-    a = 1D;
-    b = 2D;
+    a = $inline$double(1D);
+    b = $inline$double(2D);
     c = a + b;
     return c;
   }
@@ -318,86 +331,86 @@
    * Exercise constant folding on subtraction.
    */
 
-  /// CHECK-START: int Main.IntSubtraction() constant_folding (before)
+  /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const6:i\d+>>  IntConstant 6
   /// CHECK-DAG:     <<Const2:i\d+>>  IntConstant 2
   /// CHECK-DAG:     <<Sub:i\d+>>     Sub [<<Const6>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Sub>>]
 
-  /// CHECK-START: int Main.IntSubtraction() constant_folding (after)
+  /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const4:i\d+>>  IntConstant 4
   /// CHECK-DAG:                      Return [<<Const4>>]
 
-  /// CHECK-START: int Main.IntSubtraction() constant_folding (after)
+  /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Sub
 
   public static int IntSubtraction() {
     int a, b, c;
-    a = 6;
-    b = 2;
+    a = $inline$int(6);
+    b = $inline$int(2);
     c = a - b;
     return c;
   }
 
-  /// CHECK-START: long Main.LongSubtraction() constant_folding (before)
+  /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const6:j\d+>>  LongConstant 6
   /// CHECK-DAG:     <<Const2:j\d+>>  LongConstant 2
   /// CHECK-DAG:     <<Sub:j\d+>>     Sub [<<Const6>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Sub>>]
 
-  /// CHECK-START: long Main.LongSubtraction() constant_folding (after)
+  /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const4:j\d+>>  LongConstant 4
   /// CHECK-DAG:                      Return [<<Const4>>]
 
-  /// CHECK-START: long Main.LongSubtraction() constant_folding (after)
+  /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Sub
 
   public static long LongSubtraction() {
     long a, b, c;
-    a = 6L;
-    b = 2L;
+    a = $inline$long(6L);
+    b = $inline$long(2L);
     c = a - b;
     return c;
   }
 
-  /// CHECK-START: float Main.FloatSubtraction() constant_folding (before)
+  /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const6:f\d+>>  FloatConstant 6
   /// CHECK-DAG:     <<Const2:f\d+>>  FloatConstant 2
   /// CHECK-DAG:     <<Sub:f\d+>>     Sub [<<Const6>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Sub>>]
 
-  /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+  /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const4:f\d+>>  FloatConstant 4
   /// CHECK-DAG:                      Return [<<Const4>>]
 
-  /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+  /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Sub
 
   public static float FloatSubtraction() {
     float a, b, c;
-    a = 6F;
-    b = 2F;
+    a = $inline$float(6F);
+    b = $inline$float(2F);
     c = a - b;
     return c;
   }
 
-  /// CHECK-START: double Main.DoubleSubtraction() constant_folding (before)
+  /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const6:d\d+>>  DoubleConstant 6
   /// CHECK-DAG:     <<Const2:d\d+>>  DoubleConstant 2
   /// CHECK-DAG:     <<Sub:d\d+>>     Sub [<<Const6>>,<<Const2>>]
   /// CHECK-DAG:                      Return [<<Sub>>]
 
-  /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+  /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const4:d\d+>>  DoubleConstant 4
   /// CHECK-DAG:                      Return [<<Const4>>]
 
-  /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+  /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Sub
 
   public static double DoubleSubtraction() {
     double a, b, c;
-    a = 6D;
-    b = 2D;
+    a = $inline$double(6D);
+    b = $inline$double(2D);
     c = a - b;
     return c;
   }
@@ -407,86 +420,86 @@
    * Exercise constant folding on multiplication.
    */
 
-  /// CHECK-START: int Main.IntMultiplication() constant_folding (before)
+  /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:i\d+>>  IntConstant 7
   /// CHECK-DAG:     <<Const3:i\d+>>  IntConstant 3
   /// CHECK-DAG:     <<Mul:i\d+>>     Mul [<<Const7>>,<<Const3>>]
   /// CHECK-DAG:                      Return [<<Mul>>]
 
-  /// CHECK-START: int Main.IntMultiplication() constant_folding (after)
+  /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const21:i\d+>> IntConstant 21
   /// CHECK-DAG:                      Return [<<Const21>>]
 
-  /// CHECK-START: int Main.IntMultiplication() constant_folding (after)
+  /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Mul
 
   public static int IntMultiplication() {
     int a, b, c;
-    a = 7;
-    b = 3;
+    a = $inline$int(7);
+    b = $inline$int(3);
     c = a * b;
     return c;
   }
 
-  /// CHECK-START: long Main.LongMultiplication() constant_folding (before)
+  /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:j\d+>>  LongConstant 7
   /// CHECK-DAG:     <<Const3:j\d+>>  LongConstant 3
   /// CHECK-DAG:     <<Mul:j\d+>>     Mul [<<Const7>>,<<Const3>>]
   /// CHECK-DAG:                      Return [<<Mul>>]
 
-  /// CHECK-START: long Main.LongMultiplication() constant_folding (after)
+  /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const21:j\d+>> LongConstant 21
   /// CHECK-DAG:                      Return [<<Const21>>]
 
-  /// CHECK-START: long Main.LongMultiplication() constant_folding (after)
+  /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Mul
 
   public static long LongMultiplication() {
     long a, b, c;
-    a = 7L;
-    b = 3L;
+    a = $inline$long(7L);
+    b = $inline$long(3L);
     c = a * b;
     return c;
   }
 
-  /// CHECK-START: float Main.FloatMultiplication() constant_folding (before)
+  /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:f\d+>>  FloatConstant 7
   /// CHECK-DAG:     <<Const3:f\d+>>  FloatConstant 3
   /// CHECK-DAG:     <<Mul:f\d+>>     Mul [<<Const7>>,<<Const3>>]
   /// CHECK-DAG:                      Return [<<Mul>>]
 
-  /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+  /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const21:f\d+>> FloatConstant 21
   /// CHECK-DAG:                      Return [<<Const21>>]
 
-  /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+  /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Mul
 
   public static float FloatMultiplication() {
     float a, b, c;
-    a = 7F;
-    b = 3F;
+    a = $inline$float(7F);
+    b = $inline$float(3F);
     c = a * b;
     return c;
   }
 
-  /// CHECK-START: double Main.DoubleMultiplication() constant_folding (before)
+  /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:d\d+>>  DoubleConstant 7
   /// CHECK-DAG:     <<Const3:d\d+>>  DoubleConstant 3
   /// CHECK-DAG:     <<Mul:d\d+>>     Mul [<<Const7>>,<<Const3>>]
   /// CHECK-DAG:                      Return [<<Mul>>]
 
-  /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+  /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const21:d\d+>> DoubleConstant 21
   /// CHECK-DAG:                      Return [<<Const21>>]
 
-  /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+  /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Mul
 
   public static double DoubleMultiplication() {
     double a, b, c;
-    a = 7D;
-    b = 3D;
+    a = $inline$double(7D);
+    b = $inline$double(3D);
     c = a * b;
     return c;
   }
@@ -496,90 +509,90 @@
    * Exercise constant folding on division.
    */
 
-  /// CHECK-START: int Main.IntDivision() constant_folding (before)
+  /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:i\d+>>   IntConstant 8
   /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
   /// CHECK-DAG:     <<Div0Chk:i\d+>>  DivZeroCheck [<<Const3>>]
   /// CHECK-DAG:     <<Div:i\d+>>      Div [<<Const8>>,<<Div0Chk>>]
   /// CHECK-DAG:                       Return [<<Div>>]
 
-  /// CHECK-START: int Main.IntDivision() constant_folding (after)
+  /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:i\d+>>   IntConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: int Main.IntDivision() constant_folding (after)
+  /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       DivZeroCheck
   /// CHECK-NOT:                       Div
 
   public static int IntDivision() {
     int a, b, c;
-    a = 8;
-    b = 3;
+    a = $inline$int(8);
+    b = $inline$int(3);
     c = a / b;
     return c;
   }
 
-  /// CHECK-START: long Main.LongDivision() constant_folding (before)
+  /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:j\d+>>   LongConstant 8
   /// CHECK-DAG:     <<Const3:j\d+>>   LongConstant 3
   /// CHECK-DAG:     <<Div0Chk:j\d+>>  DivZeroCheck [<<Const3>>]
   /// CHECK-DAG:     <<Div:j\d+>>      Div [<<Const8>>,<<Div0Chk>>]
   /// CHECK-DAG:                       Return [<<Div>>]
 
-  /// CHECK-START: long Main.LongDivision() constant_folding (after)
+  /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:j\d+>>   LongConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: long Main.LongDivision() constant_folding (after)
+  /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       DivZeroCheck
   /// CHECK-NOT:                       Div
 
   public static long LongDivision() {
     long a, b, c;
-    a = 8L;
-    b = 3L;
+    a = $inline$long(8L);
+    b = $inline$long(3L);
     c = a / b;
     return c;
   }
 
-  /// CHECK-START: float Main.FloatDivision() constant_folding (before)
+  /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:f\d+>>   FloatConstant 8
   /// CHECK-DAG:     <<Const2P5:f\d+>> FloatConstant 2.5
   /// CHECK-DAG:     <<Div:f\d+>>      Div [<<Const8>>,<<Const2P5>>]
   /// CHECK-DAG:                       Return [<<Div>>]
 
-  /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+  /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3P2:f\d+>> FloatConstant 3.2
   /// CHECK-DAG:                       Return [<<Const3P2>>]
 
-  /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+  /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Div
 
   public static float FloatDivision() {
     float a, b, c;
-    a = 8F;
-    b = 2.5F;
+    a = $inline$float(8F);
+    b = $inline$float(2.5F);
     c = a / b;
     return c;
   }
 
-  /// CHECK-START: double Main.DoubleDivision() constant_folding (before)
+  /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:d\d+>>   DoubleConstant 8
   /// CHECK-DAG:     <<Const2P5:d\d+>> DoubleConstant 2.5
   /// CHECK-DAG:     <<Div:d\d+>>      Div [<<Const8>>,<<Const2P5>>]
   /// CHECK-DAG:                       Return [<<Div>>]
 
-  /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+  /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const3P2:d\d+>> DoubleConstant 3.2
   /// CHECK-DAG:                       Return [<<Const3P2>>]
 
-  /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+  /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Div
 
   public static double DoubleDivision() {
     double a, b, c;
-    a = 8D;
-    b = 2.5D;
+    a = $inline$double(8D);
+    b = $inline$double(2.5D);
     c = a / b;
     return c;
   }
@@ -589,90 +602,90 @@
    * Exercise constant folding on remainder.
    */
 
-  /// CHECK-START: int Main.IntRemainder() constant_folding (before)
+  /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:i\d+>>   IntConstant 8
   /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
   /// CHECK-DAG:     <<Div0Chk:i\d+>>  DivZeroCheck [<<Const3>>]
   /// CHECK-DAG:     <<Rem:i\d+>>      Rem [<<Const8>>,<<Div0Chk>>]
   /// CHECK-DAG:                       Return [<<Rem>>]
 
-  /// CHECK-START: int Main.IntRemainder() constant_folding (after)
+  /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:i\d+>>   IntConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: int Main.IntRemainder() constant_folding (after)
+  /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       DivZeroCheck
   /// CHECK-NOT:                       Rem
 
   public static int IntRemainder() {
     int a, b, c;
-    a = 8;
-    b = 3;
+    a = $inline$int(8);
+    b = $inline$int(3);
     c = a % b;
     return c;
   }
 
-  /// CHECK-START: long Main.LongRemainder() constant_folding (before)
+  /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:j\d+>>   LongConstant 8
   /// CHECK-DAG:     <<Const3:j\d+>>   LongConstant 3
   /// CHECK-DAG:     <<Div0Chk:j\d+>>  DivZeroCheck [<<Const3>>]
   /// CHECK-DAG:     <<Rem:j\d+>>      Rem [<<Const8>>,<<Div0Chk>>]
   /// CHECK-DAG:                       Return [<<Rem>>]
 
-  /// CHECK-START: long Main.LongRemainder() constant_folding (after)
+  /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:j\d+>>   LongConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: long Main.LongRemainder() constant_folding (after)
+  /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       DivZeroCheck
   /// CHECK-NOT:                       Rem
 
   public static long LongRemainder() {
     long a, b, c;
-    a = 8L;
-    b = 3L;
+    a = $inline$long(8L);
+    b = $inline$long(3L);
     c = a % b;
     return c;
   }
 
-  /// CHECK-START: float Main.FloatRemainder() constant_folding (before)
+  /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:f\d+>>   FloatConstant 8
   /// CHECK-DAG:     <<Const2P5:f\d+>> FloatConstant 2.5
   /// CHECK-DAG:     <<Rem:f\d+>>      Rem [<<Const8>>,<<Const2P5>>]
   /// CHECK-DAG:                       Return [<<Rem>>]
 
-  /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+  /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const0P5:f\d+>> FloatConstant 0.5
   /// CHECK-DAG:                       Return [<<Const0P5>>]
 
-  /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+  /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Rem
 
   public static float FloatRemainder() {
     float a, b, c;
-    a = 8F;
-    b = 2.5F;
+    a = $inline$float(8F);
+    b = $inline$float(2.5F);
     c = a % b;
     return c;
   }
 
-  /// CHECK-START: double Main.DoubleRemainder() constant_folding (before)
+  /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const8:d\d+>>   DoubleConstant 8
   /// CHECK-DAG:     <<Const2P5:d\d+>> DoubleConstant 2.5
   /// CHECK-DAG:     <<Rem:d\d+>>      Rem [<<Const8>>,<<Const2P5>>]
   /// CHECK-DAG:                       Return [<<Rem>>]
 
-  /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+  /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const0P5:d\d+>> DoubleConstant 0.5
   /// CHECK-DAG:                       Return [<<Const0P5>>]
 
-  /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+  /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Rem
 
   public static double DoubleRemainder() {
     double a, b, c;
-    a = 8D;
-    b = 2.5D;
+    a = $inline$double(8D);
+    b = $inline$double(2.5D);
     c = a % b;
     return c;
   }
@@ -682,42 +695,42 @@
    * Exercise constant folding on left shift.
    */
 
-  /// CHECK-START: int Main.ShlIntLong() constant_folding (before)
+  /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
   /// CHECK-DAG:     <<Const2L:j\d+>>  LongConstant 2
   /// CHECK-DAG:     <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
   /// CHECK-DAG:     <<Shl:i\d+>>      Shl [<<Const1>>,<<TypeConv>>]
   /// CHECK-DAG:                       Return [<<Shl>>]
 
-  /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+  /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const4:i\d+>>   IntConstant 4
   /// CHECK-DAG:                       Return [<<Const4>>]
 
-  /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+  /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Shl
 
   public static int ShlIntLong() {
-    int lhs = 1;
-    long rhs = 2;
+    int lhs = $inline$int(1);
+    long rhs = $inline$long(2L);
     return lhs << rhs;
   }
 
-  /// CHECK-START: long Main.ShlLongInt() constant_folding (before)
+  /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const3L:j\d+>>  LongConstant 3
   /// CHECK-DAG:     <<Const2:i\d+>>   IntConstant 2
   /// CHECK-DAG:     <<Shl:j\d+>>      Shl [<<Const3L>>,<<Const2>>]
   /// CHECK-DAG:                       Return [<<Shl>>]
 
-  /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+  /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const12L:j\d+>> LongConstant 12
   /// CHECK-DAG:                       Return [<<Const12L>>]
 
-  /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+  /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Shl
 
   public static long ShlLongInt() {
-    long lhs = 3;
-    int rhs = 2;
+    long lhs = $inline$long(3L);
+    int rhs = $inline$int(2);
     return lhs << rhs;
   }
 
@@ -726,42 +739,42 @@
    * Exercise constant folding on right shift.
    */
 
-  /// CHECK-START: int Main.ShrIntLong() constant_folding (before)
+  /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:i\d+>>   IntConstant 7
   /// CHECK-DAG:     <<Const2L:j\d+>>  LongConstant 2
   /// CHECK-DAG:     <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
   /// CHECK-DAG:     <<Shr:i\d+>>      Shr [<<Const7>>,<<TypeConv>>]
   /// CHECK-DAG:                       Return [<<Shr>>]
 
-  /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+  /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
   /// CHECK-DAG:                       Return [<<Const1>>]
 
-  /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+  /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Shr
 
   public static int ShrIntLong() {
-    int lhs = 7;
-    long rhs = 2;
+    int lhs = $inline$int(7);
+    long rhs = $inline$long(2L);
     return lhs >> rhs;
   }
 
-  /// CHECK-START: long Main.ShrLongInt() constant_folding (before)
+  /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const9L:j\d+>>  LongConstant 9
   /// CHECK-DAG:     <<Const2:i\d+>>   IntConstant 2
   /// CHECK-DAG:     <<Shr:j\d+>>      Shr [<<Const9L>>,<<Const2>>]
   /// CHECK-DAG:                       Return [<<Shr>>]
 
-  /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2L:j\d+>>  LongConstant 2
   /// CHECK-DAG:                       Return [<<Const2L>>]
 
-  /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Shr
 
   public static long ShrLongInt() {
-    long lhs = 9;
-    int rhs = 2;
+    long lhs = $inline$long(9);
+    int rhs = $inline$int(2);
     return lhs >> rhs;
   }
 
@@ -770,42 +783,42 @@
    * Exercise constant folding on unsigned right shift.
    */
 
-  /// CHECK-START: int Main.UShrIntLong() constant_folding (before)
+  /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<ConstM7:i\d+>>  IntConstant -7
   /// CHECK-DAG:     <<Const2L:j\d+>>  LongConstant 2
   /// CHECK-DAG:     <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
   /// CHECK-DAG:     <<UShr:i\d+>>     UShr [<<ConstM7>>,<<TypeConv>>]
   /// CHECK-DAG:                       Return [<<UShr>>]
 
-  /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+  /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<ConstRes:i\d+>> IntConstant 1073741822
   /// CHECK-DAG:                       Return [<<ConstRes>>]
 
-  /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+  /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       UShr
 
   public static int UShrIntLong() {
-    int lhs = -7;
-    long rhs = 2;
+    int lhs = $inline$int(-7);
+    long rhs = $inline$long(2L);
     return lhs >>> rhs;
   }
 
-  /// CHECK-START: long Main.UShrLongInt() constant_folding (before)
+  /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<ConstM9L:j\d+>> LongConstant -9
   /// CHECK-DAG:     <<Const2:i\d+>>   IntConstant 2
   /// CHECK-DAG:     <<UShr:j\d+>>     UShr [<<ConstM9L>>,<<Const2>>]
   /// CHECK-DAG:                       Return [<<UShr>>]
 
-  /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<ConstRes:j\d+>> LongConstant 4611686018427387901
   /// CHECK-DAG:                       Return [<<ConstRes>>]
 
-  /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       UShr
 
   public static long UShrLongInt() {
-    long lhs = -9;
-    int rhs = 2;
+    long lhs = $inline$long(-9);
+    int rhs = $inline$int(2);
     return lhs >>> rhs;
   }
 
@@ -814,43 +827,43 @@
    * Exercise constant folding on logical and.
    */
 
-  /// CHECK-START: long Main.AndIntLong() constant_folding (before)
+  /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10:i\d+>>  IntConstant 10
   /// CHECK-DAG:     <<Const3L:j\d+>>  LongConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
   /// CHECK-DAG:     <<And:j\d+>>      And [<<TypeConv>>,<<Const3L>>]
   /// CHECK-DAG:                       Return [<<And>>]
 
-  /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+  /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:j\d+>>   LongConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+  /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       And
 
   public static long AndIntLong() {
-    int lhs = 10;
-    long rhs = 3;
+    int lhs = $inline$int(10);
+    long rhs = $inline$long(3L);
     return lhs & rhs;
   }
 
-  /// CHECK-START: long Main.AndLongInt() constant_folding (before)
+  /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10L:j\d+>> LongConstant 10
   /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
   /// CHECK-DAG:     <<And:j\d+>>      And [<<TypeConv>>,<<Const10L>>]
   /// CHECK-DAG:                       Return [<<And>>]
 
-  /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+  /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const2:j\d+>>   LongConstant 2
   /// CHECK-DAG:                       Return [<<Const2>>]
 
-  /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+  /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       And
 
   public static long AndLongInt() {
-    long lhs = 10;
-    int rhs = 3;
+    long lhs = $inline$long(10L);
+    int rhs = $inline$int(3);
     return lhs & rhs;
   }
 
@@ -859,43 +872,43 @@
    * Exercise constant folding on logical or.
    */
 
-  /// CHECK-START: long Main.OrIntLong() constant_folding (before)
+  /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10:i\d+>>  IntConstant 10
   /// CHECK-DAG:     <<Const3L:j\d+>>  LongConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
   /// CHECK-DAG:     <<Or:j\d+>>       Or [<<TypeConv>>,<<Const3L>>]
   /// CHECK-DAG:                       Return [<<Or>>]
 
-  /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+  /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const11:j\d+>>  LongConstant 11
   /// CHECK-DAG:                       Return [<<Const11>>]
 
-  /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+  /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Or
 
   public static long OrIntLong() {
-    int lhs = 10;
-    long rhs = 3;
+    int lhs = $inline$int(10);
+    long rhs = $inline$long(3L);
     return lhs | rhs;
   }
 
-  /// CHECK-START: long Main.OrLongInt() constant_folding (before)
+  /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10L:j\d+>> LongConstant 10
   /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
   /// CHECK-DAG:     <<Or:j\d+>>       Or [<<TypeConv>>,<<Const10L>>]
   /// CHECK-DAG:                       Return [<<Or>>]
 
-  /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const11:j\d+>>  LongConstant 11
   /// CHECK-DAG:                       Return [<<Const11>>]
 
-  /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+  /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Or
 
   public static long OrLongInt() {
-    long lhs = 10;
-    int rhs = 3;
+    long lhs = $inline$long(10L);
+    int rhs = $inline$int(3);
     return lhs | rhs;
   }
 
@@ -904,43 +917,43 @@
    * Exercise constant folding on logical exclusive or.
    */
 
-  /// CHECK-START: long Main.XorIntLong() constant_folding (before)
+  /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10:i\d+>>  IntConstant 10
   /// CHECK-DAG:     <<Const3L:j\d+>>  LongConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
   /// CHECK-DAG:     <<Xor:j\d+>>      Xor [<<TypeConv>>,<<Const3L>>]
   /// CHECK-DAG:                       Return [<<Xor>>]
 
-  /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+  /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const9:j\d+>>   LongConstant 9
   /// CHECK-DAG:                       Return [<<Const9>>]
 
-  /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+  /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Xor
 
   public static long XorIntLong() {
-    int lhs = 10;
-    long rhs = 3;
+    int lhs = $inline$int(10);
+    long rhs = $inline$long(3L);
     return lhs ^ rhs;
   }
 
-  /// CHECK-START: long Main.XorLongInt() constant_folding (before)
+  /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const10L:j\d+>> LongConstant 10
   /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
   /// CHECK-DAG:     <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
   /// CHECK-DAG:     <<Xor:j\d+>>      Xor [<<TypeConv>>,<<Const10L>>]
   /// CHECK-DAG:                       Return [<<Xor>>]
 
-  /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+  /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const9:j\d+>>   LongConstant 9
   /// CHECK-DAG:                       Return [<<Const9>>]
 
-  /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+  /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       Xor
 
   public static long XorLongInt() {
-    long lhs = 10;
-    int rhs = 3;
+    long lhs = $inline$long(10L);
+    int rhs = $inline$int(3);
     return lhs ^ rhs;
   }
 
@@ -949,23 +962,23 @@
    * Exercise constant folding on constant (static) condition.
    */
 
-  /// CHECK-START: int Main.StaticCondition() constant_folding (before)
+  /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const7:i\d+>>  IntConstant 7
   /// CHECK-DAG:     <<Const2:i\d+>>  IntConstant 2
   /// CHECK-DAG:     <<Cond:z\d+>>    GreaterThanOrEqual [<<Const7>>,<<Const2>>]
-  /// CHECK-DAG:                      If [<<Cond>>]
+  /// CHECK-DAG:                      Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START: int Main.StaticCondition() constant_folding (after)
+  /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const1:i\d+>>  IntConstant 1
-  /// CHECK-DAG:                      If [<<Const1>>]
+  /// CHECK-DAG:                      Select [{{i\d+}},{{i\d+}},<<Const1>>]
 
-  /// CHECK-START: int Main.StaticCondition() constant_folding (after)
+  /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
   /// CHECK-NOT:                      GreaterThanOrEqual
 
   public static int StaticCondition() {
     int a, b, c;
-    a = 7;
-    b = 2;
+    a = $inline$int(7);
+    b = $inline$int(2);
     if (a < b)
       c = a + b;
     else
@@ -1010,28 +1023,30 @@
    * (forward) post-order traversal of the the dominator tree.
    */
 
-  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (before)
+  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (before)
+  /// CHECK-DAG:     <<Cond:z\d+>>    ParameterValue
   /// CHECK-DAG:     <<Const2:i\d+>>  IntConstant 2
   /// CHECK-DAG:     <<Const5:i\d+>>  IntConstant 5
   /// CHECK-DAG:     <<Add:i\d+>>     Add [<<Const5>>,<<Const2>>]
   /// CHECK-DAG:     <<Sub:i\d+>>     Sub [<<Const5>>,<<Const2>>]
-  /// CHECK-DAG:     <<Phi:i\d+>>     Phi [<<Add>>,<<Sub>>]
+  /// CHECK-DAG:     <<Phi:i\d+>>     Select [<<Sub>>,<<Add>>,<<Cond>>]
   /// CHECK-DAG:                      Return [<<Phi>>]
 
-  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after)
+  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
+  /// CHECK-DAG:     <<Cond:z\d+>>    ParameterValue
   /// CHECK-DAG:     <<Const3:i\d+>>  IntConstant 3
   /// CHECK-DAG:     <<Const7:i\d+>>  IntConstant 7
-  /// CHECK-DAG:     <<Phi:i\d+>>     Phi [<<Const7>>,<<Const3>>]
+  /// CHECK-DAG:     <<Phi:i\d+>>     Select [<<Const3>>,<<Const7>>,<<Cond>>]
   /// CHECK-DAG:                      Return [<<Phi>>]
 
-  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after)
+  /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
   /// CHECK-NOT:                      Add
   /// CHECK-NOT:                      Sub
 
   public static int JumpsAndConditionals(boolean cond) {
     int a, b, c;
-    a = 5;
-    b = 2;
+    a = $inline$int(5);
+    b = $inline$int(2);
     if (cond)
       c = a + b;
     else
@@ -1310,204 +1325,204 @@
    * Exercise constant folding on type conversions.
    */
 
-  /// CHECK-START: int Main.ReturnInt33() constant_folding (before)
+  /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const33:j\d+>>  LongConstant 33
   /// CHECK-DAG:     <<Convert:i\d+>>  TypeConversion [<<Const33>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: int Main.ReturnInt33() constant_folding (after)
+  /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const33:i\d+>>  IntConstant 33
   /// CHECK-DAG:                       Return [<<Const33>>]
 
-  /// CHECK-START: int Main.ReturnInt33() constant_folding (after)
+  /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static int ReturnInt33() {
-    long imm = 33L;
+    long imm = $inline$long(33L);
     return (int) imm;
   }
 
-  /// CHECK-START: int Main.ReturnIntMax() constant_folding (before)
+  /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<ConstMax:f\d+>> FloatConstant 1e+34
   /// CHECK-DAG:     <<Convert:i\d+>>  TypeConversion [<<ConstMax>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: int Main.ReturnIntMax() constant_folding (after)
+  /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<ConstMax:i\d+>> IntConstant 2147483647
   /// CHECK-DAG:                       Return [<<ConstMax>>]
 
-  /// CHECK-START: int Main.ReturnIntMax() constant_folding (after)
+  /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static int ReturnIntMax() {
-    float imm = 1.0e34f;
+    float imm = $inline$float(1.0e34f);
     return (int) imm;
   }
 
-  /// CHECK-START: int Main.ReturnInt0() constant_folding (before)
+  /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<ConstNaN:d\d+>> DoubleConstant nan
   /// CHECK-DAG:     <<Convert:i\d+>>  TypeConversion [<<ConstNaN>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: int Main.ReturnInt0() constant_folding (after)
+  /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
   /// CHECK-DAG:                       Return [<<Const0>>]
 
-  /// CHECK-START: int Main.ReturnInt0() constant_folding (after)
+  /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static int ReturnInt0() {
-    double imm = Double.NaN;
+    double imm = $inline$double(Double.NaN);
     return (int) imm;
   }
 
-  /// CHECK-START: long Main.ReturnLong33() constant_folding (before)
+  /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const33:i\d+>>  IntConstant 33
   /// CHECK-DAG:     <<Convert:j\d+>>  TypeConversion [<<Const33>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: long Main.ReturnLong33() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const33:j\d+>>  LongConstant 33
   /// CHECK-DAG:                       Return [<<Const33>>]
 
-  /// CHECK-START: long Main.ReturnLong33() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static long ReturnLong33() {
-    int imm = 33;
+    int imm = $inline$int(33);
     return (long) imm;
   }
 
-  /// CHECK-START: long Main.ReturnLong34() constant_folding (before)
+  /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const34:f\d+>>  FloatConstant 34
   /// CHECK-DAG:     <<Convert:j\d+>>  TypeConversion [<<Const34>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: long Main.ReturnLong34() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const34:j\d+>>  LongConstant 34
   /// CHECK-DAG:                       Return [<<Const34>>]
 
-  /// CHECK-START: long Main.ReturnLong34() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static long ReturnLong34() {
-    float imm = 34.0f;
+    float imm = $inline$float(34.0f);
     return (long) imm;
   }
 
-  /// CHECK-START: long Main.ReturnLong0() constant_folding (before)
+  /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<ConstNaN:d\d+>> DoubleConstant nan
   /// CHECK-DAG:     <<Convert:j\d+>>  TypeConversion [<<ConstNaN>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: long Main.ReturnLong0() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const0:j\d+>>   LongConstant 0
   /// CHECK-DAG:                       Return [<<Const0>>]
 
-  /// CHECK-START: long Main.ReturnLong0() constant_folding (after)
+  /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static long ReturnLong0() {
-    double imm = -Double.NaN;
+    double imm = $inline$double(-Double.NaN);
     return (long) imm;
   }
 
-  /// CHECK-START: float Main.ReturnFloat33() constant_folding (before)
+  /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const33:i\d+>>  IntConstant 33
   /// CHECK-DAG:     <<Convert:f\d+>>  TypeConversion [<<Const33>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: float Main.ReturnFloat33() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const33:f\d+>>  FloatConstant 33
   /// CHECK-DAG:                       Return [<<Const33>>]
 
-  /// CHECK-START: float Main.ReturnFloat33() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static float ReturnFloat33() {
-    int imm = 33;
+    int imm = $inline$int(33);
     return (float) imm;
   }
 
-  /// CHECK-START: float Main.ReturnFloat34() constant_folding (before)
+  /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const34:j\d+>>  LongConstant 34
   /// CHECK-DAG:     <<Convert:f\d+>>  TypeConversion [<<Const34>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: float Main.ReturnFloat34() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const34:f\d+>>  FloatConstant 34
   /// CHECK-DAG:                       Return [<<Const34>>]
 
-  /// CHECK-START: float Main.ReturnFloat34() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static float ReturnFloat34() {
-    long imm = 34L;
+    long imm = $inline$long(34L);
     return (float) imm;
   }
 
-  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (before)
+  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const:d\d+>>    DoubleConstant 99.25
   /// CHECK-DAG:     <<Convert:f\d+>>  TypeConversion [<<Const>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const:f\d+>>    FloatConstant 99.25
   /// CHECK-DAG:                       Return [<<Const>>]
 
-  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (after)
+  /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static float ReturnFloat99P25() {
-    double imm = 99.25;
+    double imm = $inline$double(99.25);
     return (float) imm;
   }
 
-  /// CHECK-START: double Main.ReturnDouble33() constant_folding (before)
+  /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const33:i\d+>>  IntConstant 33
   /// CHECK-DAG:     <<Convert:d\d+>>  TypeConversion [<<Const33>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: double Main.ReturnDouble33() constant_folding (after)
+  /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const33:d\d+>>  DoubleConstant 33
   /// CHECK-DAG:                       Return [<<Const33>>]
 
   public static double ReturnDouble33() {
-    int imm = 33;
+    int imm = $inline$int(33);
     return (double) imm;
   }
 
-  /// CHECK-START: double Main.ReturnDouble34() constant_folding (before)
+  /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const34:j\d+>>  LongConstant 34
   /// CHECK-DAG:     <<Convert:d\d+>>  TypeConversion [<<Const34>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: double Main.ReturnDouble34() constant_folding (after)
+  /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const34:d\d+>>  DoubleConstant 34
   /// CHECK-DAG:                       Return [<<Const34>>]
 
-  /// CHECK-START: double Main.ReturnDouble34() constant_folding (after)
+  /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static double ReturnDouble34() {
-    long imm = 34L;
+    long imm = $inline$long(34L);
     return (double) imm;
   }
 
-  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (before)
+  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (before)
   /// CHECK-DAG:     <<Const:f\d+>>    FloatConstant 99.25
   /// CHECK-DAG:     <<Convert:d\d+>>  TypeConversion [<<Const>>]
   /// CHECK-DAG:                       Return [<<Convert>>]
 
-  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (after)
+  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
   /// CHECK-DAG:     <<Const:d\d+>>    DoubleConstant 99.25
   /// CHECK-DAG:                       Return [<<Const>>]
 
-  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (after)
+  /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
   /// CHECK-NOT:                       TypeConversion
 
   public static double ReturnDouble99P25() {
-    float imm = 99.25f;
+    float imm = $inline$float(99.25f);
     return (double) imm;
   }
 
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 31bb94c..66e1d92 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -137,20 +137,16 @@
   /// CHECK: ArraySet
   /// CHECK: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
-  /// CHECK: ArraySet
 
   /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  /// CHECK: BoundsCheck
+  /// CHECK: Deoptimize
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
-  /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
 
   static void $opt$noinline$constantIndexing2(int[] array) {
@@ -158,8 +154,7 @@
     array[2] = 1;
     array[3] = 1;
     array[4] = 1;
-    array[-1] = 1;  // prevents the whole opt on [-1:4]
-    if (array[1] == 1) {
+    if (array[1] != 1) {
       throw new Error("");
     }
   }
@@ -173,8 +168,41 @@
   /// CHECK: ArraySet
   /// CHECK: BoundsCheck
   /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
 
   /// CHECK-START: void Main.constantIndexing2b(int[]) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+
+  static void constantIndexing2b(int[] array) {
+    array[0] = 6;
+    array[1] = 6;
+    array[2] = 6;
+    array[3] = 6;
+    array[-1] = 1;  // prevents the whole opt on [-1:4]
+  }
+
+  /// CHECK-START: void Main.constantIndexing2c(int[]) BCE (before)
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+
+  /// CHECK-START: void Main.constantIndexing2c(int[]) BCE (after)
   /// CHECK: Deoptimize
   /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
@@ -185,7 +213,7 @@
   /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
 
-  static void constantIndexing2b(int[] array) {
+  static void constantIndexing2c(int[] array) {
     array[0] = 7;
     array[1] = 7;
     array[2] = 7;
@@ -391,6 +419,36 @@
       array[base + 1] = 1;
   }
 
+  /// CHECK-START: void Main.constantIndexing10(int[], int) BCE (before)
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+
+  /// CHECK-START: void Main.constantIndexing10(int[], int) BCE (after)
+  /// CHECK: Deoptimize
+  /// CHECK: Deoptimize
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK: ArraySet
+
+  static void constantIndexing10(int[] array, int base) {
+    // Offset hidden in incremented base.
+    array[base] = 1;
+    array[++base] = 2;
+    array[++base] = 3;
+    array[++base] = 4;
+  }
+
   static void runAllConstantIndices() {
     int[] a1 = { 0 };
     int[] a6 = { 0, 0, 0, 0, 0, 0 };
@@ -410,31 +468,37 @@
       System.out.println("constant indices 1 failed!");
     }
 
-    caught = false;
-    try {
-      $opt$noinline$constantIndexing2(a6);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      caught = true;
-    }
-    if (!caught || a6[0] != 0 || a6[1] != 1 || a6[2] != 1 ||
-                   a6[3] != 1 || a6[4] != 1 || a6[5] != 11) {
+    $opt$noinline$constantIndexing2(a6);
+    if (a6[0] != 0 || a6[1] != 1 || a6[2] != 1 ||
+        a6[3] != 1 || a6[4] != 1 || a6[5] != 11) {
       System.out.println("constant indices 2 failed!");
     }
 
     caught = false;
     try {
-      constantIndexing2b(a1);
+      constantIndexing2b(a6);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      caught = true;
+    }
+    if (!caught || a6[0] != 6 || a6[1] != 6 || a6[2] != 6 ||
+                   a6[3] != 6 || a6[4] != 1 || a6[5] != 11) {
+      System.out.println("constant indices 2b failed!");
+    }
+
+    caught = false;
+    try {
+      constantIndexing2c(a1);
     } catch (ArrayIndexOutOfBoundsException e) {
       caught = true;
     }
     if (!caught || a1[0] != 7) {
-      System.out.println("constant indices 2b failed!");
+      System.out.println("constant indices 2c failed!");
     }
 
-    constantIndexing2b(a6);
+    constantIndexing2c(a6);
     if (a6[0] != 7 || a6[1] != 7 || a6[2] != 7 ||
         a6[3] != 7 || a6[4] != 1 || a6[5] != 11) {
-      System.out.println("constant indices 2b failed!");
+      System.out.println("constant indices 2c failed!");
     }
 
     int[] b4 = new int[4];
@@ -502,6 +566,12 @@
         a6[3] != 3 || a6[4] != 40 || a6[5] != 10) {
       System.out.println("constant indices 9 failed!");
     }
+
+    constantIndexing10(a6, 0);
+    if (a6[0] != 1 || a6[1] != 2  || a6[2] != 3  ||
+        a6[3] != 4 || a6[4] != 40 || a6[5] != 10) {
+      System.out.println("constant indices 10 failed!");
+    }
   }
 
   // A helper into which the actual throwing function should be inlined.
diff --git a/test/450-checker-types/smali/SmaliTests.smali b/test/450-checker-types/smali/SmaliTests.smali
new file mode 100644
index 0000000..6a3122e
--- /dev/null
+++ b/test/450-checker-types/smali/SmaliTests.smali
@@ -0,0 +1,120 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSmaliTests;
+.super Ljava/lang/Object;
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG:     <<Cst0:i\d+>> IntConstant 0
+## CHECK-DAG:     <<IOf:z\d+>>  InstanceOf
+## CHECK-DAG:                   Equal [<<IOf>>,<<Cst0>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK:         CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT:     CheckCast
+
+.method public static testInstanceOf_EQ0_NotInlined(Ljava/lang/Object;)V
+  .registers 3
+
+  const v0, 0x0
+  instance-of v1, p0, LSubclassC;
+  if-eq v1, v0, :return
+
+  check-cast p0, LSubclassC;
+  invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+
+  :return
+  return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG:     <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG:     <<IOf:z\d+>>  InstanceOf
+## CHECK-DAG:                   Equal [<<IOf>>,<<Cst1>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK:         CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT:     CheckCast
+
+.method public static testInstanceOf_EQ1_NotInlined(Ljava/lang/Object;)V
+  .registers 3
+
+  const v0, 0x1
+  instance-of v1, p0, LSubclassC;
+  if-eq v1, v0, :invoke
+  return-void
+
+  :invoke
+  check-cast p0, LSubclassC;
+  invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+  return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG:     <<Cst0:i\d+>> IntConstant 0
+## CHECK-DAG:     <<IOf:z\d+>>  InstanceOf
+## CHECK-DAG:                   NotEqual [<<IOf>>,<<Cst0>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK:         CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT:     CheckCast
+
+.method public static testInstanceOf_NE0_NotInlined(Ljava/lang/Object;)V
+  .registers 3
+
+  const v0, 0x0
+  instance-of v1, p0, LSubclassC;
+  if-ne v1, v0, :invoke
+  return-void
+
+  :invoke
+  check-cast p0, LSubclassC;
+  invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+  return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG:     <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG:     <<IOf:z\d+>>  InstanceOf
+## CHECK-DAG:                   NotEqual [<<IOf>>,<<Cst1>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK:         CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT:     CheckCast
+
+.method public static testInstanceOf_NE1_NotInlined(Ljava/lang/Object;)V
+  .registers 3
+
+  const v0, 0x1
+  instance-of v1, p0, LSubclassC;
+  if-ne v1, v0, :return
+
+  check-cast p0, LSubclassC;
+  invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+
+  :return
+  return-void
+
+.end method
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 027a9d9..08b6cec 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -205,58 +205,6 @@
   public static boolean $inline$InstanceofSubclassB(Object o) { return o instanceof SubclassB; }
   public static boolean $inline$InstanceofSubclassC(Object o) { return o instanceof SubclassC; }
 
-  /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) builder (after)
-  /// CHECK-DAG:     <<Cst0:i\d+>> IntConstant 0
-  /// CHECK-DAG:     <<Cst1:i\d+>> IntConstant 1
-  /// CHECK-DAG:     <<IOf1:z\d+>> InstanceOf
-  /// CHECK-DAG:                   NotEqual [<<IOf1>>,<<Cst1>>]
-  /// CHECK-DAG:     <<IOf2:z\d+>> InstanceOf
-  /// CHECK-DAG:                   Equal [<<IOf2>>,<<Cst0>>]
-
-  /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
-  /// CHECK:         CheckCast
-  /// CHECK:         CheckCast
-  /// CHECK-NOT:     CheckCast
-
-  /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
-  /// CHECK-NOT:     CheckCast
-  public void testInstanceOf_NotInlined(Object o) {
-    if ((o instanceof SubclassC) == true) {
-      ((SubclassC)o).$noinline$g();
-    }
-    if ((o instanceof SubclassB) != false) {
-      ((SubclassB)o).$noinline$g();
-    }
-  }
-
-  /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) builder (after)
-  /// CHECK-DAG:     <<Cst0:i\d+>> IntConstant 0
-  /// CHECK-DAG:     <<Cst1:i\d+>> IntConstant 1
-  /// CHECK-DAG:     <<IOf1:z\d+>> InstanceOf
-  /// CHECK-DAG:                   Equal [<<IOf1>>,<<Cst1>>]
-  /// CHECK-DAG:     <<IOf2:z\d+>> InstanceOf
-  /// CHECK-DAG:                   NotEqual [<<IOf2>>,<<Cst0>>]
-
-  /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
-  /// CHECK:         CheckCast
-  /// CHECK:         CheckCast
-  /// CHECK-NOT:     CheckCast
-
-  /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
-  /// CHECK-NOT:     CheckCast
-  public void testNotInstanceOf_NotInlined(Object o) {
-    if ((o instanceof SubclassC) != true) {
-      // Empty branch to flip the condition.
-    } else {
-      ((SubclassC)o).$noinline$g();
-    }
-    if ((o instanceof SubclassB) == false) {
-      // Empty branch to flip the condition.
-    } else {
-      ((SubclassB)o).$noinline$g();
-    }
-  }
-
   /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) inliner (after)
   /// CHECK-DAG:     <<IOf:z\d+>>  InstanceOf
   /// CHECK-DAG:                   If [<<IOf>>]
diff --git a/test/454-get-vreg/build b/test/454-get-vreg/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/454-get-vreg/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
-  --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/458-checker-instruction-simplification/smali/SmaliTests.smali b/test/458-checker-instruction-simplification/smali/SmaliTests.smali
new file mode 100644
index 0000000..ede599b
--- /dev/null
+++ b/test/458-checker-instruction-simplification/smali/SmaliTests.smali
@@ -0,0 +1,193 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSmaliTests;
+.super Ljava/lang/Object;
+
+## CHECK-START: int SmaliTests.EqualTrueRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
+## CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Arg>>,<<Const1>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualTrueRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static EqualTrueRhs(Z)I
+  .registers 3
+
+  const v0, 0x1
+  const v1, 0x5
+  if-eq p0, v0, :return
+  const v1, 0x3
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualTrueLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
+## CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Const1>>,<<Arg>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualTrueLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static EqualTrueLhs(Z)I
+  .registers 3
+
+  const v0, 0x1
+  const v1, 0x5
+  if-eq v0, p0, :return
+  const v1, 0x3
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualFalseRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
+## CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Arg>>,<<Const0>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualFalseRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static EqualFalseRhs(Z)I
+  .registers 3
+
+  const v0, 0x0
+  const v1, 0x3
+  if-eq p0, v0, :return
+  const v1, 0x5
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualFalseLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
+## CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Const0>>,<<Arg>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualFalseLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static EqualFalseLhs(Z)I
+  .registers 3
+
+  const v0, 0x0
+  const v1, 0x3
+  if-eq v0, p0, :return
+  const v1, 0x5
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualTrueRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
+## CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Arg>>,<<Const1>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualTrueRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static NotEqualTrueRhs(Z)I
+  .registers 3
+
+  const v0, 0x1
+  const v1, 0x3
+  if-ne p0, v0, :return
+  const v1, 0x5
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualTrueLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
+## CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Const1>>,<<Arg>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualTrueLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static NotEqualTrueLhs(Z)I
+  .registers 3
+
+  const v0, 0x1
+  const v1, 0x3
+  if-ne v0, p0, :return
+  const v1, 0x5
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualFalseRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
+## CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Arg>>,<<Const0>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualFalseRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static NotEqualFalseRhs(Z)I
+  .registers 3
+
+  const v0, 0x0
+  const v1, 0x5
+  if-ne p0, v0, :return
+  const v1, 0x3
+  :return
+  return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualFalseLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
+## CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Const0>>,<<Arg>>]
+## CHECK-DAG:                       If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualFalseLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
+## CHECK-DAG:                       If [<<Arg>>]
+
+.method public static NotEqualFalseLhs(Z)I
+  .registers 3
+
+  const v0, 0x0
+  const v1, 0x5
+  if-ne v0, p0, :return
+  const v1, 0x3
+  :return
+  return v1
+
+.end method
+
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 8d6bb65..8640148 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+import java.lang.reflect.Method;
+
 public class Main {
 
   public static void assertBooleanEquals(boolean expected, boolean result) {
@@ -826,17 +828,16 @@
 
   /// CHECK-START: long Main.NotNot1(long) instruction_simplifier (before)
   /// CHECK-DAG:     <<Arg:j\d+>>      ParameterValue
-  /// CHECK-DAG:     <<ConstF1:j\d+>>  LongConstant -1
-  /// CHECK-DAG:     <<Xor1:j\d+>>     Xor [<<Arg>>,<<ConstF1>>]
-  /// CHECK-DAG:     <<Xor2:j\d+>>     Xor [<<Xor1>>,<<ConstF1>>]
-  /// CHECK-DAG:                       Return [<<Xor2>>]
+  /// CHECK-DAG:     <<Not1:j\d+>>     Not [<<Arg>>]
+  /// CHECK-DAG:     <<Not2:j\d+>>     Not [<<Not1>>]
+  /// CHECK-DAG:                       Return [<<Not2>>]
 
   /// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
   /// CHECK-DAG:     <<Arg:j\d+>>      ParameterValue
   /// CHECK-DAG:                       Return [<<Arg>>]
 
   /// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
-  /// CHECK-NOT:                       Xor
+  /// CHECK-NOT:                       Not
 
   public static long NotNot1(long arg) {
     return ~~arg;
@@ -844,10 +845,9 @@
 
   /// CHECK-START: int Main.NotNot2(int) instruction_simplifier (before)
   /// CHECK-DAG:     <<Arg:i\d+>>      ParameterValue
-  /// CHECK-DAG:     <<ConstF1:i\d+>>  IntConstant -1
-  /// CHECK-DAG:     <<Xor1:i\d+>>     Xor [<<Arg>>,<<ConstF1>>]
-  /// CHECK-DAG:     <<Xor2:i\d+>>     Xor [<<Xor1>>,<<ConstF1>>]
-  /// CHECK-DAG:     <<Add:i\d+>>      Add [<<Xor2>>,<<Xor1>>]
+  /// CHECK-DAG:     <<Not1:i\d+>>     Not [<<Arg>>]
+  /// CHECK-DAG:     <<Not2:i\d+>>     Not [<<Not1>>]
+  /// CHECK-DAG:     <<Add:i\d+>>      Add [<<Not2>>,<<Not1>>]
   /// CHECK-DAG:                       Return [<<Add>>]
 
   /// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
@@ -857,7 +857,8 @@
   /// CHECK-DAG:                       Return [<<Add>>]
 
   /// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
-  /// CHECK-NOT:                       Xor
+  /// CHECK:                           Not
+  /// CHECK-NOT:                       Not
 
   public static int NotNot2(int arg) {
     int temp = ~arg;
@@ -965,174 +966,6 @@
     return res;
   }
 
-  /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
-  /// CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Arg>>,<<Const1>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const3>>,<<Const5>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int EqualTrueRhs(boolean arg) {
-    return (arg != true) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
-  /// CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Const1>>,<<Arg>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const3>>,<<Const5>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int EqualTrueLhs(boolean arg) {
-    return (true != arg) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
-  /// CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Arg>>,<<Const0>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const5>>,<<Const3>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int EqualFalseRhs(boolean arg) {
-    return (arg != false) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
-  /// CHECK-DAG:     <<Cond:z\d+>>     Equal [<<Arg>>,<<Const0>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const5>>,<<Const3>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int EqualFalseLhs(boolean arg) {
-    return (false != arg) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
-  /// CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Arg>>,<<Const1>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const5>>,<<Const3>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int NotEqualTrueRhs(boolean arg) {
-    return (arg == true) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
-  /// CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Const1>>,<<Arg>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const5>>,<<Const3>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int NotEqualTrueLhs(boolean arg) {
-    return (true == arg) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
-  /// CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Arg>>,<<Const0>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const3>>,<<Const5>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int NotEqualFalseRhs(boolean arg) {
-    return (arg == false) ? 3 : 5;
-  }
-
-  /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
-  /// CHECK-DAG:     <<Cond:z\d+>>     NotEqual [<<Arg>>,<<Const0>>]
-  /// CHECK-DAG:                       If [<<Cond>>]
-
-  /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:                       If [<<Arg>>]
-
-  /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier_before_codegen (after)
-  /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const3:i\d+>>   IntConstant 3
-  /// CHECK-DAG:     <<Const5:i\d+>>   IntConstant 5
-  /// CHECK-DAG:     <<Select:i\d+>>   Select [<<Const3>>,<<Const5>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Select>>]
-
-  public static int NotEqualFalseLhs(boolean arg) {
-    return (false == arg) ? 3 : 5;
-  }
-
   /// CHECK-START: boolean Main.EqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (before)
   /// CHECK-DAG:     <<Arg:z\d+>>      ParameterValue
   /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
@@ -1307,17 +1140,16 @@
     return arg * 31;
   }
 
-  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (before)
+  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_after_bce (before)
   /// CHECK-DAG:      <<Const1:i\d+>>   IntConstant 1
+  /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
+  /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
   /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
   /// CHECK-DAG:      <<NE:z\d+>>       NotEqual [<<Field>>,<<Const1>>]
-  /// CHECK-DAG:                        If [<<NE>>]
+  /// CHECK-DAG:      <<Select:i\d+>>   Select [<<Const13>>,<<Const54>>,<<NE>>]
+  /// CHECK-DAG:                        Return [<<Select>>]
 
-  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (after)
-  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
-  /// CHECK-DAG:                        If [<<Field>>]
-
-  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_before_codegen (after)
+  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_after_bce (after)
   /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
   /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
   /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
@@ -1325,20 +1157,19 @@
   /// CHECK-DAG:                        Return [<<Select>>]
 
   public static int booleanFieldNotEqualOne() {
-    return (booleanField == true) ? 13 : 54;
+    return (booleanField == $inline$true()) ? 13 : 54;
   }
 
-  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (before)
+  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_after_bce (before)
   /// CHECK-DAG:      <<Const0:i\d+>>   IntConstant 0
+  /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
+  /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
   /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
-  /// CHECK-DAG:      <<EQ:z\d+>>       Equal [<<Field>>,<<Const0>>]
-  /// CHECK-DAG:                        If [<<EQ>>]
+  /// CHECK-DAG:      <<NE:z\d+>>       Equal [<<Field>>,<<Const0>>]
+  /// CHECK-DAG:      <<Select:i\d+>>   Select [<<Const13>>,<<Const54>>,<<NE>>]
+  /// CHECK-DAG:                        Return [<<Select>>]
 
-  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (after)
-  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
-  /// CHECK-DAG:                        If [<<Field>>]
-
-  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_before_codegen (after)
+  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_after_bce (after)
   /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
   /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
   /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
@@ -1346,7 +1177,7 @@
   /// CHECK-DAG:                        Return [<<Select>>]
 
   public static int booleanFieldEqualZero() {
-    return (booleanField != false) ? 13 : 54;
+    return (booleanField != $inline$false()) ? 13 : 54;
   }
 
   /// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (before)
@@ -1374,7 +1205,7 @@
   // LessThanOrEqual instructions.
 
   public static int intConditionNotEqualOne(int i) {
-    return ((i > 42) == true) ? 13 : 54;
+    return ((i > 42) == $inline$true()) ? 13 : 54;
   }
 
   /// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (before)
@@ -1402,7 +1233,7 @@
   // LessThanOrEqual instructions.
 
   public static int intConditionEqualZero(int i) {
-    return ((i > 42) != false) ? 13 : 54;
+    return ((i > 42) != $inline$false()) ? 13 : 54;
   }
 
   // Test that conditions on float/double are not flipped.
@@ -1770,6 +1601,16 @@
     return (short) (value & 0x17fff);
   }
 
+  public static int runSmaliTest(String name, boolean input) {
+    try {
+      Class<?> c = Class.forName("SmaliTests");
+      Method m = c.getMethod(name, new Class[] { boolean.class });
+      return (Integer) m.invoke(null, input);
+    } catch (Exception ex) {
+      throw new Error(ex);
+    }
+  }
+
   public static void main(String[] args) {
     int arg = 123456;
 
@@ -1804,14 +1645,6 @@
     assertIntEquals(SubNeg1(arg, arg + 1), -(arg + arg + 1));
     assertIntEquals(SubNeg2(arg, arg + 1), -(arg + arg + 1));
     assertLongEquals(SubNeg3(arg, arg + 1), -(2 * arg + 1));
-    assertIntEquals(EqualTrueRhs(true), 5);
-    assertIntEquals(EqualTrueLhs(true), 5);
-    assertIntEquals(EqualFalseRhs(true), 3);
-    assertIntEquals(EqualFalseLhs(true), 3);
-    assertIntEquals(NotEqualTrueRhs(true), 3);
-    assertIntEquals(NotEqualTrueLhs(true), 3);
-    assertIntEquals(NotEqualFalseRhs(true), 5);
-    assertIntEquals(NotEqualFalseLhs(true), 5);
     assertBooleanEquals(EqualBoolVsIntConst(true), true);
     assertBooleanEquals(EqualBoolVsIntConst(true), true);
     assertBooleanEquals(NotEqualBoolVsIntConst(false), false);
@@ -1906,7 +1739,20 @@
     assertIntEquals(intAnd0x17fffToShort(0x88888888), 0x0888);
     assertIntEquals(intAnd0x17fffToShort(Integer.MIN_VALUE), 0);
     assertIntEquals(intAnd0x17fffToShort(Integer.MAX_VALUE), Short.MAX_VALUE);
+
+    for (String condition : new String[] { "Equal", "NotEqual" }) {
+      for (String constant : new String[] { "True", "False" }) {
+        for (String side : new String[] { "Rhs", "Lhs" }) {
+          String name = condition + constant + side;
+          assertIntEquals(runSmaliTest(name, true), 5);
+          assertIntEquals(runSmaliTest(name, false), 3);
+        }
+      }
+    }
   }
 
+  private static boolean $inline$true() { return true; }
+  private static boolean $inline$false() { return false; }
+
   public static boolean booleanField;
 }
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index 682f126..f0fe1b1 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -42,7 +42,7 @@
   /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
   /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
   /// CHECK-DAG:                       If [<<Param>>]
-  /// CHECK-DAG:     <<Phi:i\d+>>      Phi [<<Const1>>,<<Const0>>]
+  /// CHECK-DAG:     <<Phi:i\d+>>      Phi [<<Const0>>,<<Const1>>]
   /// CHECK-DAG:                       Return [<<Phi>>]
 
   /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
@@ -185,11 +185,7 @@
   /// CHECK-NOT:                       BooleanNot
 
   public static int NegatedCondition(boolean x) {
-    if (x != false) {
-      return 42;
-    } else {
-      return 43;
-    }
+    return (x != false) ? 42 : 43;
   }
 
   /// CHECK-START: int Main.SimpleTrueBlock(boolean, int) select_generator (after)
@@ -253,13 +249,7 @@
   /// CHECK-DAG:                        Return [<<Select123>>]
 
   public static int ThreeBlocks(boolean x, boolean y) {
-    if (x) {
-      return 1;
-    } else if (y) {
-      return 2;
-    } else {
-      return 3;
-    }
+    return x ? 1 : (y ? 2 : 3);
   }
 
   /// CHECK-START: int Main.MultiplePhis() select_generator (before)
@@ -292,8 +282,10 @@
     while (y++ < 10) {
       if (y > 1) {
         x = 13;
+        continue;
       } else {
         x = 42;
+        continue;
       }
     }
     return x;
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
index deff279..2e5fd25 100644
--- a/test/530-checker-loops/src/Main.java
+++ b/test/530-checker-loops/src/Main.java
@@ -394,6 +394,34 @@
     return result;
   }
 
+  /// CHECK-START: int Main.linearForNEArrayLengthUp(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.linearForNEArrayLengthUp(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int linearForNEArrayLengthUp(int[] x) {
+    int result = 0;
+    for (int i = 0; i != x.length; i++) {
+      result += x[i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.linearForNEArrayLengthDown(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.linearForNEArrayLengthDown(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int linearForNEArrayLengthDown(int[] x) {
+    int result = 0;
+    for (int i = x.length - 1; i != -1; i--) {
+      result += x[i];
+    }
+    return result;
+  }
+
   /// CHECK-START: int Main.linearDoWhileUp() BCE (before)
   /// CHECK-DAG: BoundsCheck
   //
@@ -471,7 +499,7 @@
   //
   /// CHECK-START: void Main.linearTriangularOnTwoArrayLengths(int) BCE (after)
   /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
+  //  TODO: also CHECK-NOT: Deoptimize, see b/27151190
   private static void linearTriangularOnTwoArrayLengths(int n) {
     int[] a = new int[n];
     for (int i = 0; i < a.length; i++) {
@@ -513,7 +541,7 @@
   //
   /// CHECK-START: void Main.linearTriangularOnParameter(int) BCE (after)
   /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
+  //  TODO: also CHECK-NOT: Deoptimize, see b/27151190
   private static void linearTriangularOnParameter(int n) {
     int[] a = new int[n];
     for (int i = 0; i < n; i++) {
@@ -528,22 +556,22 @@
     }
   }
 
-  /// CHECK-START: void Main.linearTriangularVariations(int) BCE (before)
+  /// CHECK-START: void Main.linearTriangularVariationsInnerStrict(int) BCE (before)
   /// CHECK-DAG: BoundsCheck
   /// CHECK-DAG: BoundsCheck
   /// CHECK-DAG: BoundsCheck
   /// CHECK-DAG: BoundsCheck
   //
-  /// CHECK-START: void Main.linearTriangularVariations(int) BCE (after)
+  /// CHECK-START: void Main.linearTriangularVariationsInnerStrict(int) BCE (after)
   /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static void linearTriangularVariations(int n) {
+  //  TODO: also CHECK-NOT: Deoptimize, see b/27151190
+  private static void linearTriangularVariationsInnerStrict(int n) {
     int[] a = new int[n];
     for (int i = 0; i < n; i++) {
       for (int j = 0; j < i; j++) {
         a[j] += 1;
       }
-      for (int j = i - 1; j >= 0; j--) {
+      for (int j = i - 1; j > -1; j--) {
         a[j] += 1;
       }
       for (int j = i; j < n; j++) {
@@ -556,6 +584,34 @@
     verifyTriangular(a);
   }
 
+  /// CHECK-START: void Main.linearTriangularVariationsInnerNonStrict(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.linearTriangularVariationsInnerNonStrict(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  //  TODO: also CHECK-NOT: Deoptimize, see b/27151190
+  private static void linearTriangularVariationsInnerNonStrict(int n) {
+    int[] a = new int[n];
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j <= i - 1; j++) {
+        a[j] += 1;
+      }
+      for (int j = i - 1; j >= 0; j--) {
+        a[j] += 1;
+      }
+      for (int j = i; j <= n - 1; j++) {
+        a[j] += 1;
+      }
+      for (int j = n - 1; j >= i; j--) {
+        a[j] += 1;
+      }
+    }
+    verifyTriangular(a);
+  }
+
   // Verifier for triangular loops.
   private static void verifyTriangular(int[] a, int[] b, int m, int n) {
     expectEquals(n, a.length);
@@ -577,592 +633,6 @@
     }
   }
 
-  /// CHECK-START: void Main.bubble(int[]) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.bubble(int[]) BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static void bubble(int[] a) {
-    for (int i = a.length; --i >= 0;) {
-      for (int j = 0; j < i; j++) {
-        if (a[j] > a[j+1]) {
-          int tmp = a[j];
-          a[j]  = a[j+1];
-          a[j+1] = tmp;
-        }
-      }
-    }
-  }
-
-  /// CHECK-START: int Main.periodicIdiom(int) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.periodicIdiom(int) BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int periodicIdiom(int tc) {
-    int[] x = { 1, 3 };
-    // Loop with periodic sequence (0, 1).
-    int k = 0;
-    int result = 0;
-    for (int i = 0; i < tc; i++) {
-      result += x[k];
-      k = 1 - k;
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.periodicSequence2(int) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.periodicSequence2(int) BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int periodicSequence2(int tc) {
-    int[] x = { 1, 3 };
-    // Loop with periodic sequence (0, 1).
-    int k = 0;
-    int l = 1;
-    int result = 0;
-    for (int i = 0; i < tc; i++) {
-      result += x[k];
-      int t = l;
-      l = k;
-      k = t;
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.periodicSequence4(int) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  /// CHECK-DAG: BoundsCheck
-  /// CHECK-DAG: BoundsCheck
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.periodicSequence4(int) BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int periodicSequence4(int tc) {
-    int[] x = { 1, 3, 5, 7 };
-    // Loop with periodic sequence (0, 1, 2, 3).
-    int k = 0;
-    int l = 1;
-    int m = 2;
-    int n = 3;
-    int result = 0;
-    for (int i = 0; i < tc; i++) {
-      result += x[k] + x[l] + x[m] + x[n];  // all used at once
-      int t = n;
-      n = k;
-      k = l;
-      l = m;
-      m = t;
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightUp1() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightUp1() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightUp1() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MAX_VALUE - 10, k = 0; i < Integer.MAX_VALUE; i++) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightUp2() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightUp2() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightUp2() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MAX_VALUE - 10; i < Integer.MAX_VALUE; i++) {
-      result += x[i - Integer.MAX_VALUE + 10];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightUp3() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightUp3() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightUp3() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MAX_VALUE - 10, k = 0; i <= Integer.MAX_VALUE - 1; i++) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justOOBUp() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justOOBUp() BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justOOBUp() BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static int justOOBUp() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    // Infinite loop!
-    for (int i = Integer.MAX_VALUE - 9, k = 0; i <= Integer.MAX_VALUE; i++) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightDown1() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightDown1() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightDown1() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MIN_VALUE + 10, k = 0; i > Integer.MIN_VALUE; i--) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightDown2() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightDown2() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightDown2() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MIN_VALUE + 10; i > Integer.MIN_VALUE; i--) {
-      result += x[Integer.MAX_VALUE + i];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justRightDown3() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justRightDown3() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int justRightDown3() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    for (int i = Integer.MIN_VALUE + 10, k = 0; i >= Integer.MIN_VALUE + 1; i--) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.justOOBDown() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justOOBDown() BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int Main.justOOBDown() BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static int justOOBDown() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int result = 0;
-    // Infinite loop!
-    for (int i = Integer.MIN_VALUE + 9, k = 0; i >= Integer.MIN_VALUE; i--) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: void Main.lowerOOB(int[]) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static void lowerOOB(int[] x) {
-    // OOB!
-    for (int i = -1; i < x.length; i++) {
-      sResult += x[i];
-    }
-  }
-
-  /// CHECK-START: void Main.upperOOB(int[]) BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.upperOOB(int[]) BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.upperOOB(int[]) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static void upperOOB(int[] x) {
-    // OOB!
-    for (int i = 0; i <= x.length; i++) {
-      sResult += x[i];
-    }
-  }
-
-  /// CHECK-START: void Main.doWhileUpOOB() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.doWhileUpOOB() BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.doWhileUpOOB() BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static void doWhileUpOOB() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int i = 0;
-    // OOB!
-    do {
-      sResult += x[i++];
-    } while (i <= x.length);
-  }
-
-  /// CHECK-START: void Main.doWhileDownOOB() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.doWhileDownOOB() BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: void Main.doWhileDownOOB() BCE (after)
-  /// CHECK-NOT: Deoptimize
-  private static void doWhileDownOOB() {
-    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
-    int i = x.length - 1;
-    // OOB!
-    do {
-      sResult += x[i--];
-    } while (-1 <= i);
-  }
-
-  /// CHECK-START: int[] Main.multiply1() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int[] Main.multiply1() BCE (after)
-  /// CHECK-NOT: BoundsCheck
-  /// CHECK-NOT: Deoptimize
-  private static int[] multiply1() {
-    int[] a = new int[10];
-    try {
-      for (int i = 0; i <= 3; i++) {
-        for (int j = 0; j <= 3; j++) {
-          // Range [0,9]: safe.
-          a[i * j] += 1;
-        }
-      }
-    } catch (Exception e) {
-      a[0] += 1000;
-    }
-    return a;
-  }
-
-  /// CHECK-START: int[] Main.multiply2() BCE (before)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int[] Main.multiply2() BCE (after)
-  /// CHECK-DAG: BoundsCheck
-  //
-  /// CHECK-START: int[] Main.multiply2() BCE (after)
-  /// CHECK-NOT: Deoptimize
-  static int[] multiply2() {
-    int[] a = new int[10];
-    try {
-      for (int i = -3; i <= 3; i++) {
-        for (int j = -3; j <= 3; j++) {
-          // Range [-9,9]: unsafe.
-          a[i * j] += 1;
-        }
-      }
-    } catch (Exception e) {
-      a[0] += 1000;
-    }
-    return a;
-  }
-
-  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  private static int linearDynamicBCE1(int[] x, int lo, int hi) {
-    int result = 0;
-    for (int i = lo; i < hi; i++) {
-      sResult += x[i];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  private static int linearDynamicBCE2(int[] x, int lo, int hi, int offset) {
-    int result = 0;
-    for (int i = lo; i < hi; i++) {
-      sResult += x[offset + i];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  private static int wrapAroundDynamicBCE(int[] x) {
-    int w = 9;
-    int result = 0;
-    for (int i = 0; i < 10; i++) {
-      result += x[w];
-      w = i;
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  private static int periodicDynamicBCE(int[] x) {
-    int k = 0;
-    int result = 0;
-    for (int i = 0; i < 10; i++) {
-      result += x[k];
-      k = 1 - k;
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  static int dynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
-    // This loop could be infinite for hi = max int. Since i is also used
-    // as subscript, however, dynamic bce can proceed.
-    int result = 0;
-    for (int i = lo; i <= hi; i++) {
-      result += x[i];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  static int noDynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
-    // As above, but now the index is not used as subscript,
-    // and dynamic bce is not applied.
-    int result = 0;
-    for (int k = 0, i = lo; i <= hi; i++) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  static int noDynamicBCEMixedInductionTypes(int[] x, long lo, long hi) {
-    int result = 0;
-    // Mix of int and long induction.
-    int k = 0;
-    for (long i = lo; i < hi; i++) {
-      result += x[k++];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (before)
-  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop:B\d+>>
-  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
-  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
-  //  Order matters:
-  /// CHECK:              Deoptimize loop:<<Loop:B\d+>>
-  //  CHECK-NOT:          Goto       loop:<<Loop>>
-  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
-  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
-  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
-  /// CHECK:              Goto       loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
-  /// CHECK-DAG: Deoptimize loop:none
-  static int dynamicBCEAndConstantIndices(int[] x, int[][] a, int lo, int hi) {
-    // Deliberately test array length on a before the loop so that only bounds checks
-    // on constant subscripts remain, making them a viable candidate for hoisting.
-    if (a.length == 0) {
-      return -1;
-    }
-    // Loop that allows BCE on x[i].
-    int result = 0;
-    for (int i = lo; i < hi; i++) {
-      result += x[i];
-      if ((i % 10) != 0) {
-        // None of the subscripts inside a conditional are removed by dynamic bce,
-        // making them a candidate for deoptimization based on constant indices.
-        // Compiler should ensure the array loads are not subsequently hoisted
-        // "above" the deoptimization "barrier" on the bounds.
-        a[0][i] = 1;
-        a[1][i] = 2;
-        a[99][i] = 3;
-      }
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  //  For brevity, just test occurrence of at least one of each in the loop:
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-NOT: ArrayGet    loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
-  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
-  /// CHECK-DAG: Deoptimize  loop:none
-  static int dynamicBCEAndConstantIndicesAllPrimTypes(int[] q,
-                                                      boolean[] r,
-                                                      byte[] s,
-                                                      char[] t,
-                                                      short[] u,
-                                                      int[] v,
-                                                      long[] w,
-                                                      float[] x,
-                                                      double[] y, int lo, int hi) {
-    int result = 0;
-    for (int i = lo; i < hi; i++) {
-      // All constant index array references can be hoisted out of the loop during BCE on q[i].
-      result += q[i] + (r[0] ? 1 : 0) + (int) s[0] + (int) t[0] + (int) u[0] + (int) v[0] +
-                                        (int) w[0] + (int) x[0] + (int) y[0];
-    }
-    return result;
-  }
-
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (before)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
-  /// CHECK-DAG: NullCheck   loop:<<Loop>>
-  /// CHECK-DAG: ArrayLength loop:<<Loop>>
-  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (after)
-  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
-  /// CHECK-DAG: Deoptimize  loop:none
-  //
-  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (after)
-  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
-  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
-  static int dynamicBCEAndConstantIndexRefType(int[] q, Integer[] z, int lo, int hi) {
-    int result = 0;
-    for (int i = lo; i < hi; i++) {
-      // Similar to above, but now implicit call to intValue() may prevent hoisting
-      // z[0] itself during BCE on q[i]. Therefore, we just check BCE on q[i].
-      result += q[i] + z[0];
-    }
-    return result;
-  }
-
   //
   // Verifier.
   //
@@ -1232,6 +702,8 @@
     // Special forms.
     expectEquals(55, linearForNEUp());
     expectEquals(55, linearForNEDown());
+    expectEquals(55, linearForNEArrayLengthUp(x));
+    expectEquals(55, linearForNEArrayLengthDown(x));
     expectEquals(55, linearDoWhileUp());
     expectEquals(55, linearDoWhileDown());
     expectEquals(55, linearShort());
@@ -1239,191 +711,8 @@
     linearTriangularOnTwoArrayLengths(10);
     linearTriangularOnOneArrayLength(10);
     linearTriangularOnParameter(10);
-    linearTriangularVariations(10);
-
-    // Sorting.
-    int[] sort = { 5, 4, 1, 9, 10, 2, 7, 6, 3, 8 };
-    bubble(sort);
-    for (int i = 0; i < 10; i++) {
-      expectEquals(sort[i], x[i]);
-    }
-
-    // Periodic adds (1, 3), one at the time.
-    expectEquals(0, periodicIdiom(-1));
-    for (int tc = 0; tc < 32; tc++) {
-      int expected = (tc >> 1) << 2;
-      if ((tc & 1) != 0)
-        expected += 1;
-      expectEquals(expected, periodicIdiom(tc));
-    }
-
-    // Periodic adds (1, 3), one at the time.
-    expectEquals(0, periodicSequence2(-1));
-    for (int tc = 0; tc < 32; tc++) {
-      int expected = (tc >> 1) << 2;
-      if ((tc & 1) != 0)
-        expected += 1;
-      expectEquals(expected, periodicSequence2(tc));
-    }
-
-    // Periodic adds (1, 3, 5, 7), all at once.
-    expectEquals(0, periodicSequence4(-1));
-    for (int tc = 0; tc < 32; tc++) {
-      expectEquals(tc * 16, periodicSequence4(tc));
-    }
-
-    // Large bounds.
-    expectEquals(55, justRightUp1());
-    expectEquals(55, justRightUp2());
-    expectEquals(55, justRightUp3());
-    expectEquals(55, justRightDown1());
-    expectEquals(55, justRightDown2());
-    expectEquals(55, justRightDown3());
-    sResult = 0;
-    try {
-      justOOBUp();
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult = 1;
-    }
-    expectEquals(1, sResult);
-    sResult = 0;
-    try {
-      justOOBDown();
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult = 1;
-    }
-    expectEquals(1, sResult);
-
-    // Lower bound goes OOB.
-    sResult = 0;
-    try {
-      lowerOOB(x);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1000, sResult);
-
-    // Upper bound goes OOB.
-    sResult = 0;
-    try {
-      upperOOB(x);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1055, sResult);
-
-    // Do while up goes OOB.
-    sResult = 0;
-    try {
-      doWhileUpOOB();
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1055, sResult);
-
-    // Do while down goes OOB.
-    sResult = 0;
-    try {
-      doWhileDownOOB();
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1055, sResult);
-
-    // Multiplication.
-    {
-      int[] e1 = { 7, 1, 2, 2, 1, 0, 2, 0, 0, 1 };
-      int[] a1 = multiply1();
-      for (int i = 0; i < 10; i++) {
-        expectEquals(a1[i], e1[i]);
-      }
-      int[] e2 = { 1001, 0, 0, 1, 0, 0, 1, 0, 0, 1 };
-      int[] a2 = multiply2();
-      for (int i = 0; i < 10; i++) {
-        expectEquals(a2[i], e2[i]);
-      }
-    }
-
-    // Dynamic BCE.
-    sResult = 0;
-    try {
-      linearDynamicBCE1(x, -1, x.length);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1000, sResult);
-    sResult = 0;
-    linearDynamicBCE1(x, 0, x.length);
-    expectEquals(55, sResult);
-    sResult = 0;
-    try {
-      linearDynamicBCE1(x, 0, x.length + 1);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1055, sResult);
-
-    // Dynamic BCE with offset.
-    sResult = 0;
-    try {
-      linearDynamicBCE2(x, 0, x.length, -1);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1000, sResult);
-    sResult = 0;
-    linearDynamicBCE2(x, 0, x.length, 0);
-    expectEquals(55, sResult);
-    sResult = 0;
-    try {
-      linearDynamicBCE2(x, 0, x.length, 1);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult += 1000;
-    }
-    expectEquals(1054, sResult);
-
-    // Dynamic BCE candidates.
-    expectEquals(55, wrapAroundDynamicBCE(x));
-    expectEquals(15, periodicDynamicBCE(x));
-    expectEquals(55, dynamicBCEPossiblyInfiniteLoop(x, 0, 9));
-    expectEquals(55, noDynamicBCEPossiblyInfiniteLoop(x, 0, 9));
-    expectEquals(55, noDynamicBCEMixedInductionTypes(x, 0, 10));
-
-    // Dynamic BCE combined with constant indices.
-    int[][] a;
-    a = new int[0][0];
-    expectEquals(-1, dynamicBCEAndConstantIndices(x, a, 0, 10));
-    a = new int[100][10];
-    expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
-    for (int i = 0; i < 10; i++) {
-      expectEquals((i % 10) != 0 ? 1 : 0, a[0][i]);
-      expectEquals((i % 10) != 0 ? 2 : 0, a[1][i]);
-      expectEquals((i % 10) != 0 ? 3 : 0, a[99][i]);
-    }
-    a = new int[2][10];
-    sResult = 0;
-    try {
-      expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
-    } catch (ArrayIndexOutOfBoundsException e) {
-      sResult = 1;
-    }
-    expectEquals(1, sResult);
-    expectEquals(a[0][1], 1);
-    expectEquals(a[1][1], 2);
-
-    // Dynamic BCE combined with constant indices of all types.
-    boolean[] x1 = { true };
-    byte[] x2 = { 2 };
-    char[] x3 = { 3 };
-    short[] x4 = { 4 };
-    int[] x5 = { 5 };
-    long[] x6 = { 6 };
-    float[] x7 = { 7 };
-    double[] x8 = { 8 };
-    expectEquals(415,
-        dynamicBCEAndConstantIndicesAllPrimTypes(x, x1, x2, x3, x4, x5, x6, x7, x8, 0, 10));
-    Integer[] x9 = { 9 };
-    expectEquals(145, dynamicBCEAndConstantIndexRefType(x, x9, 0, 10));
+    linearTriangularVariationsInnerStrict(10);
+    linearTriangularVariationsInnerNonStrict(10);
   }
 
   private static void expectEquals(int expected, int result) {
diff --git a/test/530-checker-loops2/expected.txt b/test/530-checker-loops2/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-loops2/expected.txt
diff --git a/test/530-checker-loops2/info.txt b/test/530-checker-loops2/info.txt
new file mode 100644
index 0000000..f5d334d
--- /dev/null
+++ b/test/530-checker-loops2/info.txt
@@ -0,0 +1 @@
+Test on loop optimizations.
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
new file mode 100644
index 0000000..64be1a2
--- /dev/null
+++ b/test/530-checker-loops2/src/Main.java
@@ -0,0 +1,999 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on loop optimizations.
+//
+public class Main {
+
+  static int sResult;
+
+  //
+  // Various sequence variables used in bound checks.
+  //
+
+  /// CHECK-START: void Main.bubble(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.bubble(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  //  TODO: also CHECK-NOT: Deoptimize, see b/27151190
+  private static void bubble(int[] a) {
+    for (int i = a.length; --i >= 0;) {
+      for (int j = 0; j < i; j++) {
+        if (a[j] > a[j+1]) {
+          int tmp = a[j];
+          a[j]  = a[j+1];
+          a[j+1] = tmp;
+        }
+      }
+    }
+  }
+
+  /// CHECK-START: int Main.periodicIdiom(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.periodicIdiom(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int periodicIdiom(int tc) {
+    int[] x = { 1, 3 };
+    // Loop with periodic sequence (0, 1).
+    int k = 0;
+    int result = 0;
+    for (int i = 0; i < tc; i++) {
+      result += x[k];
+      k = 1 - k;
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.periodicSequence2(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.periodicSequence2(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int periodicSequence2(int tc) {
+    int[] x = { 1, 3 };
+    // Loop with periodic sequence (0, 1).
+    int k = 0;
+    int l = 1;
+    int result = 0;
+    for (int i = 0; i < tc; i++) {
+      result += x[k];
+      int t = l;
+      l = k;
+      k = t;
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.periodicSequence4(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.periodicSequence4(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int periodicSequence4(int tc) {
+    int[] x = { 1, 3, 5, 7 };
+    // Loop with periodic sequence (0, 1, 2, 3).
+    int k = 0;
+    int l = 1;
+    int m = 2;
+    int n = 3;
+    int result = 0;
+    for (int i = 0; i < tc; i++) {
+      result += x[k] + x[l] + x[m] + x[n];  // all used at once
+      int t = n;
+      n = k;
+      k = l;
+      l = m;
+      m = t;
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightUp1() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightUp1() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightUp1() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MAX_VALUE - 10, k = 0; i < Integer.MAX_VALUE; i++) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightUp2() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightUp2() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightUp2() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MAX_VALUE - 10; i < Integer.MAX_VALUE; i++) {
+      result += x[i - Integer.MAX_VALUE + 10];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightUp3() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightUp3() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightUp3() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MAX_VALUE - 10, k = 0; i <= Integer.MAX_VALUE - 1; i++) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justOOBUp() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justOOBUp() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justOOBUp() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static int justOOBUp() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    // Infinite loop!
+    for (int i = Integer.MAX_VALUE - 9, k = 0; i <= Integer.MAX_VALUE; i++) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightDown1() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightDown1() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightDown1() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MIN_VALUE + 10, k = 0; i > Integer.MIN_VALUE; i--) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightDown2() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightDown2() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightDown2() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MIN_VALUE + 10; i > Integer.MIN_VALUE; i--) {
+      result += x[Integer.MAX_VALUE + i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justRightDown3() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justRightDown3() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int justRightDown3() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = Integer.MIN_VALUE + 10, k = 0; i >= Integer.MIN_VALUE + 1; i--) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.justOOBDown() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justOOBDown() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.justOOBDown() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static int justOOBDown() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    // Infinite loop!
+    for (int i = Integer.MIN_VALUE + 9, k = 0; i >= Integer.MIN_VALUE; i--) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: void Main.lowerOOB(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static void lowerOOB(int[] x) {
+    // OOB!
+    for (int i = -1; i < x.length; i++) {
+      sResult += x[i];
+    }
+  }
+
+  /// CHECK-START: void Main.upperOOB(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.upperOOB(int[]) BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.upperOOB(int[]) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static void upperOOB(int[] x) {
+    // OOB!
+    for (int i = 0; i <= x.length; i++) {
+      sResult += x[i];
+    }
+  }
+
+  /// CHECK-START: void Main.doWhileUpOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.doWhileUpOOB() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.doWhileUpOOB() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static void doWhileUpOOB() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int i = 0;
+    // OOB!
+    do {
+      sResult += x[i++];
+    } while (i <= x.length);
+  }
+
+  /// CHECK-START: void Main.doWhileDownOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.doWhileDownOOB() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.doWhileDownOOB() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static void doWhileDownOOB() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int i = x.length - 1;
+    // OOB!
+    do {
+      sResult += x[i--];
+    } while (-1 <= i);
+  }
+
+  /// CHECK-START: void Main.hiddenOOB1(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.hiddenOOB1(int) BCE (after)
+  /// CHECK-DAG: Deoptimize
+  //
+  /// CHECK-START: void Main.hiddenOOB1(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static void hiddenOOB1(int lo) {
+    int[] a = { 1 } ;
+    for (int i = lo; i <= 10; i++) {
+      // Dangerous loop where careless static range analysis would yield strict upper bound
+      // on index j of 5. When, for instance, lo and thus i = -2147483648, the upper bound
+      // becomes really positive due to arithmetic wrap-around, causing OOB.
+      // Dynamic BCE is feasible though, since it checks the range.
+      for (int j = 4; j < i - 5; j++) {
+        sResult += a[j - 4];
+      }
+    }
+  }
+
+  /// CHECK-START: void Main.hiddenOOB2(int) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.hiddenOOB2(int) BCE (after)
+  /// CHECK-DAG: Deoptimize
+  //
+  /// CHECK-START: void Main.hiddenOOB2(int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static void hiddenOOB2(int hi) {
+    int[] a = { 1 } ;
+    for (int i = 0; i < hi; i++) {
+      // Dangerous loop where careless static range analysis would yield strict lower bound
+      // on index j of 5. When, for instance, hi and thus i = 2147483647, the upper bound
+      // becomes really negative due to arithmetic wrap-around, causing OOB.
+      // Dynamic BCE is feasible though, since it checks the range.
+      for (int j = 6; j > i + 5; j--) {
+        sResult += a[j - 6];
+      }
+    }
+  }
+
+  /// CHECK-START: void Main.hiddenInfiniteOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.hiddenInfiniteOOB() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.hiddenInfiniteOOB() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  private static void hiddenInfiniteOOB() {
+    int[] a = { 11 } ;
+    for (int i = -1; i <= 0; i++) {
+      // Dangerous loop where careless static range analysis would yield a safe upper bound
+      // of -3. In reality, due to arithmetic wrap-around (when i = -1, j <= 2147483647;
+      // whereas when i = 0, j <= -3), this is an infinite loop that goes OOB.
+      for (int j = -3; j <= 2147483646 * i - 3; j++) {
+        sResult += a[j + 3];
+      }
+    }
+  }
+
+  /// CHECK-START: void Main.hiddenFiniteOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: void Main.hiddenFiniteOOB() BCE (after)
+  /// CHECK-DAG: Deoptimize
+  //
+  /// CHECK-START: void Main.hiddenFiniteOOB() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static void hiddenFiniteOOB() {
+    int[] a = { 111 } ;
+    for (int i = -1; i <= 0; i++) {
+      // Dangerous loop similar as above where the loop is now finite, but the
+      // loop still goes out of bounds for i = -1 due to the large upper bound.
+      // Dynamic BCE is feasible though, since it checks the range.
+      for (int j = -4; j < 2147483646 * i - 3; j++) {
+        sResult += a[j + 4];
+      }
+    }
+  }
+
+  /// CHECK-START: int[] Main.add() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int[] Main.add() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int[] add() {
+    int[] a = new int[10];
+    for (int i = 0; i <= 3; i++) {
+      for (int j = 0; j <= 6; j++) {
+        a[i + j] += 1;
+      }
+    }
+    return a;
+  }
+
+  /// CHECK-START: int[] Main.multiply1() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int[] Main.multiply1() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int[] multiply1() {
+    int[] a = new int[10];
+    try {
+      for (int i = 0; i <= 3; i++) {
+        for (int j = 0; j <= 3; j++) {
+          // Range [0,9]: safe.
+          a[i * j] += 1;
+        }
+      }
+    } catch (Exception e) {
+      a[0] += 1000;
+    }
+    return a;
+  }
+
+  /// CHECK-START: int[] Main.multiply2() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int[] Main.multiply2() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int[] Main.multiply2() BCE (after)
+  /// CHECK-NOT: Deoptimize
+  static int[] multiply2() {
+    int[] a = new int[10];
+    try {
+      for (int i = -3; i <= 3; i++) {
+        for (int j = -3; j <= 3; j++) {
+          // Range [-9,9]: unsafe.
+          a[i * j] += 1;
+        }
+      }
+    } catch (Exception e) {
+      a[0] += 1000;
+    }
+    return a;
+  }
+
+  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  private static int linearDynamicBCE1(int[] x, int lo, int hi) {
+    int result = 0;
+    for (int i = lo; i < hi; i++) {
+      sResult += x[i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  private static int linearDynamicBCE2(int[] x, int lo, int hi, int offset) {
+    int result = 0;
+    for (int i = lo; i < hi; i++) {
+      sResult += x[offset + i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  private static int wrapAroundDynamicBCE(int[] x) {
+    int w = 9;
+    int result = 0;
+    for (int i = 0; i < 10; i++) {
+      result += x[w];
+      w = i;
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  private static int periodicDynamicBCE(int[] x) {
+    int k = 0;
+    int result = 0;
+    for (int i = 0; i < 10; i++) {
+      result += x[k];
+      k = 1 - k;
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:{{B\d+}}
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  static int dynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
+    // This loop could be infinite for hi = max int. Since i is also used
+    // as subscript, however, dynamic bce can proceed.
+    int result = 0;
+    for (int i = lo; i <= hi; i++) {
+      result += x[i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  static int noDynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
+    // As above, but now the index is not used as subscript,
+    // and dynamic bce is not applied.
+    int result = 0;
+    for (int k = 0, i = lo; i <= hi; i++) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  static int noDynamicBCEMixedInductionTypes(int[] x, long lo, long hi) {
+    int result = 0;
+    // Mix of int and long induction.
+    int k = 0;
+    for (long i = lo; i < hi; i++) {
+      result += x[k++];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.dynamicBCEConstantRange(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<InnerLoop:B\d+>>
+  /// CHECK-DAG: ArrayGet    loop:<<InnerLoop>>
+  /// CHECK-DAG: If          loop:<<InnerLoop>>
+  /// CHECK-DAG: If          loop:<<OuterLoop:B\d+>>
+  /// CHECK-EVAL: "<<InnerLoop>>" != "<<OuterLoop>>"
+  //
+  /// CHECK-START: int Main.dynamicBCEConstantRange(int[]) BCE (after)
+  /// CHECK-DAG: ArrayGet   loop:<<InnerLoop:B\d+>>
+  /// CHECK-DAG: Deoptimize loop:<<OuterLoop:B\d+>>
+  /// CHECK-EVAL: "<<InnerLoop>>" != "<<OuterLoop>>"
+  //
+  /// CHECK-START: int Main.dynamicBCEConstantRange(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  //
+  //  No additional top tests were introduced.
+  /// CHECK-START: int Main.dynamicBCEConstantRange(int[]) BCE (after)
+  /// CHECK-DAG: If
+  /// CHECK-DAG: If
+  /// CHECK-NOT: If
+  static int dynamicBCEConstantRange(int[] x) {
+    int result = 0;
+    for (int i = 2; i <= 6; i++) {
+      // Range analysis sees that innermost loop is finite and always taken.
+      for (int j = i - 2; j <= i + 2; j++) {
+        result += x[j];
+      }
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (before)
+  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop:B\d+>>
+  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
+  /// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
+  //  Order matters:
+  /// CHECK:              Deoptimize loop:<<Loop:B\d+>>
+  //  CHECK-NOT:          Goto       loop:<<Loop>>
+  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
+  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
+  /// CHECK-DAG: {{l\d+}} ArrayGet   loop:<<Loop>>
+  /// CHECK:              Goto       loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
+  /// CHECK-DAG: Deoptimize loop:none
+  static int dynamicBCEAndConstantIndices(int[] x, int[][] a, int lo, int hi) {
+    // Deliberately test array length on a before the loop so that only bounds checks
+    // on constant subscripts remain, making them a viable candidate for hoisting.
+    if (a.length == 0) {
+      return -1;
+    }
+    // Loop that allows BCE on x[i].
+    int result = 0;
+    for (int i = lo; i < hi; i++) {
+      result += x[i];
+      if ((i % 10) != 0) {
+        // None of the subscripts inside a conditional are removed by dynamic bce,
+        // making them a candidate for deoptimization based on constant indices.
+        // Compiler should ensure the array loads are not subsequently hoisted
+        // "above" the deoptimization "barrier" on the bounds.
+        a[0][i] = 1;
+        a[1][i] = 2;
+        a[99][i] = 3;
+      }
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  //  For brevity, just test occurrence of at least one of each in the loop:
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-NOT: ArrayGet    loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
+  /// CHECK-NOT: NullCheck   loop:{{B\d+}}
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllPrimTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], int, int) BCE (after)
+  /// CHECK-DAG: Deoptimize  loop:none
+  static int dynamicBCEAndConstantIndicesAllPrimTypes(int[] q,
+                                                      boolean[] r,
+                                                      byte[] s,
+                                                      char[] t,
+                                                      short[] u,
+                                                      int[] v,
+                                                      long[] w,
+                                                      float[] x,
+                                                      double[] y, int lo, int hi) {
+    int result = 0;
+    for (int i = lo; i < hi; i++) {
+      // All constant index array references can be hoisted out of the loop during BCE on q[i].
+      result += q[i] + (r[0] ? 1 : 0) + (int) s[0] + (int) t[0] + (int) u[0] + (int) v[0] +
+                                        (int) w[0] + (int) x[0] + (int) y[0];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (before)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  /// CHECK-DAG: ArrayGet    loop:<<Loop>>
+  /// CHECK-DAG: NullCheck   loop:<<Loop>>
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (after)
+  /// CHECK-DAG: ArrayGet    loop:<<Loop:B\d+>>
+  /// CHECK-DAG: Deoptimize  loop:none
+  //
+  /// CHECK-START: int Main.dynamicBCEAndConstantIndexRefType(int[], java.lang.Integer[], int, int) BCE (after)
+  /// CHECK-NOT: ArrayLength loop:{{B\d+}}
+  /// CHECK-NOT: BoundsCheck loop:{{B\d+}}
+  static int dynamicBCEAndConstantIndexRefType(int[] q, Integer[] z, int lo, int hi) {
+    int result = 0;
+    for (int i = lo; i < hi; i++) {
+      // Similar to above, but now implicit call to intValue() may prevent hoisting
+      // z[0] itself during BCE on q[i]. Therefore, we just check BCE on q[i].
+      result += q[i] + z[0];
+    }
+    return result;
+  }
+
+  //
+  // Verifier.
+  //
+
+  public static void main(String[] args) {
+    // Set to run expensive tests for correctness too.
+    boolean HEAVY = false;
+
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+
+    // Sorting.
+    int[] sort = { 5, 4, 1, 9, 10, 2, 7, 6, 3, 8 };
+    bubble(sort);
+    for (int i = 0; i < 10; i++) {
+      expectEquals(sort[i], x[i]);
+    }
+
+    // Periodic adds (1, 3), one at the time.
+    expectEquals(0, periodicIdiom(-1));
+    for (int tc = 0; tc < 32; tc++) {
+      int expected = (tc >> 1) << 2;
+      if ((tc & 1) != 0)
+        expected += 1;
+      expectEquals(expected, periodicIdiom(tc));
+    }
+
+    // Periodic adds (1, 3), one at the time.
+    expectEquals(0, periodicSequence2(-1));
+    for (int tc = 0; tc < 32; tc++) {
+      int expected = (tc >> 1) << 2;
+      if ((tc & 1) != 0)
+        expected += 1;
+      expectEquals(expected, periodicSequence2(tc));
+    }
+
+    // Periodic adds (1, 3, 5, 7), all at once.
+    expectEquals(0, periodicSequence4(-1));
+    for (int tc = 0; tc < 32; tc++) {
+      expectEquals(tc * 16, periodicSequence4(tc));
+    }
+
+    // Large bounds.
+    expectEquals(55, justRightUp1());
+    expectEquals(55, justRightUp2());
+    expectEquals(55, justRightUp3());
+    expectEquals(55, justRightDown1());
+    expectEquals(55, justRightDown2());
+    expectEquals(55, justRightDown3());
+    sResult = 0;
+    try {
+      justOOBUp();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult = 1;
+    }
+    expectEquals(1, sResult);
+    sResult = 0;
+    try {
+      justOOBDown();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult = 1;
+    }
+    expectEquals(1, sResult);
+
+    // Lower bound goes OOB.
+    sResult = 0;
+    try {
+      lowerOOB(x);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1000, sResult);
+
+    // Upper bound goes OOB.
+    sResult = 0;
+    try {
+      upperOOB(x);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
+
+    // Do while up goes OOB.
+    sResult = 0;
+    try {
+      doWhileUpOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
+
+    // Do while down goes OOB.
+    sResult = 0;
+    try {
+      doWhileDownOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
+
+    // Hidden OOB.
+    sResult = 0;
+    try {
+      hiddenOOB1(10);  // no OOB
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1, sResult);
+    sResult = 0;
+    try {
+      hiddenOOB1(-2147483648);  // OOB
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1001, sResult);
+    sResult = 0;
+    try {
+      hiddenOOB2(1);  // no OOB
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1, sResult);
+    if (HEAVY) {
+      sResult = 0;
+      try {
+        hiddenOOB2(2147483647);  // OOB
+      } catch (ArrayIndexOutOfBoundsException e) {
+        sResult += 1000;
+      }
+      expectEquals(1002, sResult);
+    }
+    sResult = 0;
+    try {
+      hiddenInfiniteOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1011, sResult);
+    sResult = 0;
+    try {
+      hiddenFiniteOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1111, sResult);
+
+    // Addition.
+    {
+      int[] e1 ={ 1, 2, 3, 4, 4, 4, 4, 3, 2, 1 };
+      int[] a1 = add();
+      for (int i = 0; i < 10; i++) {
+        expectEquals(a1[i], e1[i]);
+      }
+    }
+
+    // Multiplication.
+    {
+      int[] e1 = { 7, 1, 2, 2, 1, 0, 2, 0, 0, 1 };
+      int[] a1 = multiply1();
+      for (int i = 0; i < 10; i++) {
+        expectEquals(a1[i], e1[i]);
+      }
+      int[] e2 = { 1001, 0, 0, 1, 0, 0, 1, 0, 0, 1 };
+      int[] a2 = multiply2();
+      for (int i = 0; i < 10; i++) {
+        expectEquals(a2[i], e2[i]);
+      }
+    }
+
+    // Dynamic BCE.
+    sResult = 0;
+    try {
+      linearDynamicBCE1(x, -1, x.length);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1000, sResult);
+    sResult = 0;
+    linearDynamicBCE1(x, 0, x.length);
+    expectEquals(55, sResult);
+    sResult = 0;
+    try {
+      linearDynamicBCE1(x, 0, x.length + 1);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
+
+    // Dynamic BCE with offset.
+    sResult = 0;
+    try {
+      linearDynamicBCE2(x, 0, x.length, -1);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1000, sResult);
+    sResult = 0;
+    linearDynamicBCE2(x, 0, x.length, 0);
+    expectEquals(55, sResult);
+    sResult = 0;
+    try {
+      linearDynamicBCE2(x, 0, x.length, 1);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1054, sResult);
+
+    // Dynamic BCE candidates.
+    expectEquals(55, wrapAroundDynamicBCE(x));
+    expectEquals(15, periodicDynamicBCE(x));
+    expectEquals(55, dynamicBCEPossiblyInfiniteLoop(x, 0, 9));
+    expectEquals(55, noDynamicBCEPossiblyInfiniteLoop(x, 0, 9));
+    expectEquals(55, noDynamicBCEMixedInductionTypes(x, 0, 10));
+    expectEquals(125, dynamicBCEConstantRange(x));
+
+    // Dynamic BCE combined with constant indices.
+    int[][] a;
+    a = new int[0][0];
+    expectEquals(-1, dynamicBCEAndConstantIndices(x, a, 0, 10));
+    a = new int[100][10];
+    expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
+    for (int i = 0; i < 10; i++) {
+      expectEquals((i % 10) != 0 ? 1 : 0, a[0][i]);
+      expectEquals((i % 10) != 0 ? 2 : 0, a[1][i]);
+      expectEquals((i % 10) != 0 ? 3 : 0, a[99][i]);
+    }
+    a = new int[2][10];
+    sResult = 0;
+    try {
+      expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult = 1;
+    }
+    expectEquals(1, sResult);
+    expectEquals(a[0][1], 1);
+    expectEquals(a[1][1], 2);
+
+    // Dynamic BCE combined with constant indices of all types.
+    boolean[] x1 = { true };
+    byte[] x2 = { 2 };
+    char[] x3 = { 3 };
+    short[] x4 = { 4 };
+    int[] x5 = { 5 };
+    long[] x6 = { 6 };
+    float[] x7 = { 7 };
+    double[] x8 = { 8 };
+    expectEquals(415,
+        dynamicBCEAndConstantIndicesAllPrimTypes(x, x1, x2, x3, x4, x5, x6, x7, x8, 0, 10));
+    Integer[] x9 = { 9 };
+    expectEquals(145, dynamicBCEAndConstantIndexRefType(x, x9, 0, 10));
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/537-checker-inline-and-unverified/src/Main.java b/test/537-checker-inline-and-unverified/src/Main.java
index bdc14b0..b9d5fc9 100644
--- a/test/537-checker-inline-and-unverified/src/Main.java
+++ b/test/537-checker-inline-and-unverified/src/Main.java
@@ -45,12 +45,14 @@
     }
 
     public static boolean $opt$noinline$testNoInline() {
+        boolean result = true;
         try {
-            return null instanceof InaccessibleClass;
+            result = (null instanceof InaccessibleClass);
+            throw new Error("Unreachable");
         } catch (IllegalAccessError e) {
             // expected
         }
-        return false;
+        return result;
     }
 
     public static boolean $opt$inline$testInline() {
diff --git a/test/550-checker-multiply-accumulate/src/Main.java b/test/550-checker-multiply-accumulate/src/Main.java
index 2d0688d..09376a2 100644
--- a/test/550-checker-multiply-accumulate/src/Main.java
+++ b/test/550-checker-multiply-accumulate/src/Main.java
@@ -1,18 +1,18 @@
 /*
-* Copyright (C) 2015 The Android Open Source Project
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-*      http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 public class Main {
 
@@ -47,7 +47,7 @@
   /// CHECK:       <<Acc:i\d+>>         ParameterValue
   /// CHECK:       <<Left:i\d+>>        ParameterValue
   /// CHECK:       <<Right:i\d+>>       ParameterValue
-  /// CHECK:       <<MulAdd:i\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Add
+  /// CHECK:       <<MulAdd:i\d+>>      MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Add
   /// CHECK:                            Return [<<MulAdd>>]
 
   /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (after)
@@ -57,6 +57,28 @@
   /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) disassembly (after)
   /// CHECK:                            madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}}
 
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<MulAdd:i\d+>>      MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Add
+  /// CHECK:                            Return [<<MulAdd>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Add
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulAdd(int, int, int) disassembly (after)
+  /// CHECK:                            mla r{{\d+}}, r{{\d+}}, r{{\d+}}, r{{\d+}}
+
   public static int $opt$noinline$mulAdd(int acc, int left, int right) {
     if (doThrow) throw new Error();
     return acc + left * right;
@@ -78,7 +100,7 @@
   /// CHECK:       <<Acc:j\d+>>         ParameterValue
   /// CHECK:       <<Left:j\d+>>        ParameterValue
   /// CHECK:       <<Right:j\d+>>       ParameterValue
-  /// CHECK:       <<MulSub:j\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Sub
+  /// CHECK:       <<MulSub:j\d+>>      MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Sub
   /// CHECK:                            Return [<<MulSub>>]
 
   /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (after)
@@ -88,6 +110,17 @@
   /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) disassembly (after)
   /// CHECK:                            msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}}
 
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:                            Return [<<Sub>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
+
   public static long $opt$noinline$mulSub(long acc, long left, long right) {
     if (doThrow) throw new Error();
     return acc - left * right;
@@ -117,7 +150,28 @@
   /// CHECK:                            Return [<<Or>>]
 
   /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (after)
-  /// CHECK-NOT:                        Arm64MultiplyAccumulate
+  /// CHECK-NOT:                        MultiplyAccumulate
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Or:i\d+>>          Or [<<Mul>>,<<Add>>]
+  /// CHECK:                            Return [<<Or>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Or:i\d+>>          Or [<<Mul>>,<<Add>>]
+  /// CHECK:                            Return [<<Or>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
 
   public static int $opt$noinline$multipleUses1(int acc, int left, int right) {
     if (doThrow) throw new Error();
@@ -151,7 +205,30 @@
   /// CHECK:                            Return [<<Res>>]
 
   /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (after)
-  /// CHECK-NOT:                        Arm64MultiplyAccumulate
+  /// CHECK-NOT:                        MultiplyAccumulate
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:j\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Res:j\d+>>         Add [<<Add>>,<<Sub>>]
+  /// CHECK:                            Return [<<Res>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm (after)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:j\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Res:j\d+>>         Add [<<Add>>,<<Sub>>]
+  /// CHECK:                            Return [<<Res>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
 
 
   public static long $opt$noinline$multipleUses2(long acc, long left, long right) {
@@ -176,7 +253,7 @@
   /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after)
   /// CHECK:       <<Acc:i\d+>>         ParameterValue
   /// CHECK:       <<Var:i\d+>>         ParameterValue
-  /// CHECK:       <<MulAdd:i\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Add
+  /// CHECK:       <<MulAdd:i\d+>>      MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Add
   /// CHECK:                            Return [<<MulAdd>>]
 
   /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after)
@@ -186,6 +263,27 @@
   /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) disassembly (after)
   /// CHECK:                            madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}}
 
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Var:i\d+>>         ParameterValue
+  /// CHECK:       <<Const1:i\d+>>      IntConstant 1
+  /// CHECK:       <<Add:i\d+>>         Add [<<Var>>,<<Const1>>]
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Acc>>,<<Add>>]
+  /// CHECK:                            Return [<<Mul>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Var:i\d+>>         ParameterValue
+  /// CHECK:       <<MulAdd:i\d+>>      MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Add
+  /// CHECK:                            Return [<<MulAdd>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Add
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulPlusOne(int, int) disassembly (after)
+  /// CHECK:                            mla r{{\d+}}, r{{\d+}}, r{{\d+}}, r{{\d+}}
+
   public static int $opt$noinline$mulPlusOne(int acc, int var) {
     if (doThrow) throw new Error();
     return acc * (var + 1);
@@ -207,7 +305,7 @@
   /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after)
   /// CHECK:       <<Acc:j\d+>>         ParameterValue
   /// CHECK:       <<Var:j\d+>>         ParameterValue
-  /// CHECK:       <<MulSub:j\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Sub
+  /// CHECK:       <<MulSub:j\d+>>      MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Sub
   /// CHECK:                            Return [<<MulSub>>]
 
   /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after)
@@ -217,11 +315,114 @@
   /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) disassembly (after)
   /// CHECK:                            msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}}
 
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Var:j\d+>>         ParameterValue
+  /// CHECK:       <<Const1:j\d+>>      LongConstant 1
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Const1>>,<<Var>>]
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Acc>>,<<Sub>>]
+  /// CHECK:                            Return [<<Mul>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
   public static long $opt$noinline$mulMinusOne(long acc, long var) {
     if (doThrow) throw new Error();
     return acc * (1 - var);
   }
 
+  /**
+   * Test basic merging of `MUL+NEG` into `MULNEG`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:i\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Const0:i\d+>>      IntConstant 0
+  /// CHECK:       <<MulNeg:i\d+>>      MultiplyAccumulate [<<Const0>>,<<Left>>,<<Right>>] kind:Sub
+  /// CHECK:                            Return [<<MulNeg>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Neg
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulNeg(int, int) disassembly (after)
+  /// CHECK:                            mneg w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm (before)
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:i\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm (after)
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:i\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM: int Main.$opt$noinline$mulNeg(int, int) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
+
+  public static int $opt$noinline$mulNeg(int left, int right) {
+    if (doThrow) throw new Error();
+    return - (left * right);
+  }
+
+  /**
+   * Test basic merging of `MUL+NEG` into `MULNEG`.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:j\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Const0:j\d+>>      LongConstant 0
+  /// CHECK:       <<MulNeg:j\d+>>      MultiplyAccumulate [<<Const0>>,<<Left>>,<<Right>>] kind:Sub
+  /// CHECK:                            Return [<<MulNeg>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Neg
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulNeg(long, long) disassembly (after)
+  /// CHECK:                            mneg x{{\d+}}, x{{\d+}}, x{{\d+}}
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm (before)
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:j\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm (after)
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Neg:j\d+>>         Neg [<<Mul>>]
+  /// CHECK:                            Return [<<Neg>>]
+
+  /// CHECK-START-ARM: long Main.$opt$noinline$mulNeg(long, long) instruction_simplifier_arm (after)
+  /// CHECK-NOT:                        MultiplyAccumulate
+
+  public static long $opt$noinline$mulNeg(long left, long right) {
+    if (doThrow) throw new Error();
+    return - (left * right);
+  }
 
   public static void main(String[] args) {
     assertIntEquals(7, $opt$noinline$mulAdd(1, 2, 3));
@@ -230,5 +431,7 @@
     assertLongEquals(20, $opt$noinline$multipleUses2(10, 11, 12));
     assertIntEquals(195, $opt$noinline$mulPlusOne(13, 14));
     assertLongEquals(-225, $opt$noinline$mulMinusOne(15, 16));
+    assertIntEquals(-306, $opt$noinline$mulNeg(17, 18));
+    assertLongEquals(-380, $opt$noinline$mulNeg(19, 20));
   }
 }
diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java
index 8d73d69..9c86154 100644
--- a/test/551-checker-shifter-operand/src/Main.java
+++ b/test/551-checker-shifter-operand/src/Main.java
@@ -1,18 +1,18 @@
 /*
-* Copyright (C) 2015 The Android Open Source Project
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-*      http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 public class Main {
 
diff --git a/test/555-checker-regression-x86const/build b/test/555-checker-regression-x86const/build
index 09dcc36..92ddfc9 100644
--- a/test/555-checker-regression-x86const/build
+++ b/test/555-checker-regression-x86const/build
@@ -27,14 +27,12 @@
 mv classes/UnresolvedClass.class classes-ex
 
 if [ ${USE_JACK} = "true" ]; then
-  # Create .jack files from classes generated with javac.
-  ${JILL} classes --output classes.jack
-  ${JILL} classes-ex --output classes-ex.jack
+  jar cf classes.jill.jar -C classes .
+  jar cf classes-ex.jill.jar -C classes-ex .
 
-  # Create DEX files from .jack files.
-  ${JACK} --import classes.jack --output-dex .
+  ${JACK} --import classes.jill.jar --output-dex .
   zip $TEST_NAME.jar classes.dex
-  ${JACK} --import classes-ex.jack --output-dex .
+  ${JACK} --import classes-ex.jill.jar --output-dex .
   zip ${TEST_NAME}-ex.jar classes.dex
 else
   if [ ${NEED_DEX} = "true" ]; then
diff --git a/test/564-checker-negbitwise/expected.txt b/test/564-checker-negbitwise/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/564-checker-negbitwise/expected.txt
diff --git a/test/564-checker-negbitwise/info.txt b/test/564-checker-negbitwise/info.txt
new file mode 100644
index 0000000..28b9e9e
--- /dev/null
+++ b/test/564-checker-negbitwise/info.txt
@@ -0,0 +1 @@
+Test negated bitwise operations simplification on ARM64.
diff --git a/test/564-checker-negbitwise/src/Main.java b/test/564-checker-negbitwise/src/Main.java
new file mode 100644
index 0000000..3de7be7
--- /dev/null
+++ b/test/564-checker-negbitwise/src/Main.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  // A dummy value to defeat inlining of these routines.
+  static boolean doThrow = false;
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertLongEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /**
+   * Test merging of `NOT+AND` into `BIC`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          And [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:And
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        And
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) disassembly (after)
+  /// CHECK:                            bic w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$notAnd(int base, int mask) {
+    if (doThrow) throw new Error();
+    return base & ~mask;
+  }
+
+  /**
+   * Test merging of `NOT+ORR` into `ORN`.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:j\d+>>        ParameterValue
+  /// CHECK:       <<Mask:j\d+>>        ParameterValue
+  /// CHECK:       <<Not:j\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:j\d+>>          Or [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:j\d+>>        ParameterValue
+  /// CHECK:       <<Mask:j\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:j\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Or
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Or
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) disassembly (after)
+  /// CHECK:                            orn x{{\d+}}, x{{\d+}}, x{{\d+}}
+
+  public static long $opt$noinline$notOr(long base, long mask) {
+    if (doThrow) throw new Error();
+    return base | ~mask;
+  }
+
+  /**
+   * Test merging of `NOT+EOR` into `EON`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          Xor [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Xor
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Xor
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) disassembly (after)
+  /// CHECK:                            eon w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$notXor(int base, int mask) {
+    if (doThrow) throw new Error();
+    return base ^ ~mask;
+  }
+
+  /**
+   * Check that the transformation is also done when the base is a constant.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Constant:i\d+>>    IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          Xor [<<Not>>,<<Constant>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Constant:i\d+>>    IntConstant
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Constant>>,<<Mask>>] kind:Xor
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Xor
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) disassembly (after)
+  /// CHECK:                            mov <<Reg:w\d+>>, #0xf
+  /// CHECK:                            eon w{{\d+}}, <<Reg>>, w{{\d+}}
+
+  public static int $opt$noinline$notXorConstant(int mask) {
+    if (doThrow) throw new Error();
+    return 0xf ^ ~mask;
+  }
+
+  /**
+   * Check that no transformation is done when Not has multiple uses.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<One:i\d+>>         IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op1:i\d+>>         And [<<Not>>,<<One>>]
+  /// CHECK:       <<Op2:i\d+>>         And [<<Base>>,<<Not>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Op1>>,<<Op2>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<One:i\d+>>         IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op1:i\d+>>         And [<<Not>>,<<One>>]
+  /// CHECK:       <<Op2:i\d+>>         And [<<Base>>,<<Not>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Op1>>,<<Op2>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64BitwiseNegatedRight
+
+  public static int $opt$noinline$notAndMultipleUses(int base, int mask) {
+    if (doThrow) throw new Error();
+    int tmp = ~mask;
+    return (tmp & 0x1) + (base & tmp);
+  }
+
+  /**
+   * Check that no transformation is done when both inputs are Not's.
+   */
+
+  // We don't check the instructions before the pass, since if De Morgan's laws
+  // have been applied then Not/Not/Or is replaced by And/Not.
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$deMorganOr(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64BitwiseNegatedRight
+
+  public static int $opt$noinline$deMorganOr(int a, int b) {
+    if (doThrow) throw new Error();
+    return ~a | ~b;
+  }
+
+  public static void main(String[] args) {
+    assertIntEquals(0xe,   $opt$noinline$notAnd(0xf, 0x1));
+    assertLongEquals(~0x0, $opt$noinline$notOr(0xf, 0x1));
+    assertIntEquals(~0xe,  $opt$noinline$notXor(0xf, 0x1));
+    assertIntEquals(~0xe,  $opt$noinline$notXorConstant(0x1));
+    assertIntEquals(0xe,   $opt$noinline$notAndMultipleUses(0xf, 0x1));
+    assertIntEquals(~0x1,  $opt$noinline$deMorganOr(0x3, 0x1));
+  }
+}
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index dc4cb76..acfcecd 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -28,10 +28,7 @@
   /// CHECK-EVAL:    <<UseInput>> == <<LivSel>> + 1
 
   public static int p(float arg) {
-    if (arg > 5.0f) {
-      return 0;
-    }
-    return -1;
+    return (arg > 5.0f) ? 0 : -1;
   }
 
   /// CHECK-START: void Main.main(java.lang.String[]) liveness (after)
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index 41af97b..e426b75 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -1,18 +1,18 @@
 /*
-* Copyright (C) 2016 The Android Open Source Project
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-*      http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 public class Main {
 
@@ -35,14 +35,11 @@
    * Test transformation of Not/Not/And into Or/Not.
    */
 
-  // Note: before the instruction_simplifier pass, Xor's are used instead of
-  // Not's (the simplification happens during the same pass).
   /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
-  /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
-  /// CHECK:       <<Not1:i\d+>>        Xor [<<P1>>,<<CstM1>>]
-  /// CHECK:       <<Not2:i\d+>>        Xor [<<P2>>,<<CstM1>>]
+  /// CHECK:       <<Not1:i\d+>>        Not [<<P1>>]
+  /// CHECK:       <<Not2:i\d+>>        Not [<<P2>>]
   /// CHECK:       <<And:i\d+>>         And [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<And>>]
 
@@ -106,14 +103,11 @@
    * Test transformation of Not/Not/Or into And/Not.
    */
 
-  // See note above.
-  // The second Xor has its arguments reversed for no obvious reason.
   /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (before)
   /// CHECK:       <<P1:j\d+>>          ParameterValue
   /// CHECK:       <<P2:j\d+>>          ParameterValue
-  /// CHECK:       <<CstM1:j\d+>>       LongConstant -1
-  /// CHECK:       <<Not1:j\d+>>        Xor [<<P1>>,<<CstM1>>]
-  /// CHECK:       <<Not2:j\d+>>        Xor [<<CstM1>>,<<P2>>]
+  /// CHECK:       <<Not1:j\d+>>        Not [<<P1>>]
+  /// CHECK:       <<Not2:j\d+>>        Not [<<P2>>]
   /// CHECK:       <<Or:j\d+>>          Or [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Or>>]
 
@@ -183,12 +177,11 @@
   /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
-  /// CHECK-DAG:   <<Cst1:i\d+>>        IntConstant 1
-  /// CHECK-DAG:   <<CstM1:i\d+>>       IntConstant -1
+  /// CHECK:       <<Cst1:i\d+>>        IntConstant 1
   /// CHECK:       <<AddP1:i\d+>>       Add [<<P1>>,<<Cst1>>]
-  /// CHECK:       <<Not1:i\d+>>        Xor [<<AddP1>>,<<CstM1>>]
+  /// CHECK:       <<Not1:i\d+>>        Not [<<AddP1>>]
   /// CHECK:       <<AddP2:i\d+>>       Add [<<P2>>,<<Cst1>>]
-  /// CHECK:       <<Not2:i\d+>>        Xor [<<AddP2>>,<<CstM1>>]
+  /// CHECK:       <<Not2:i\d+>>        Not [<<AddP2>>]
   /// CHECK:       <<Or:i\d+>>          Or [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Or>>]
 
@@ -226,9 +219,8 @@
   /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
-  /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
-  /// CHECK:       <<Not1:i\d+>>        Xor [<<P1>>,<<CstM1>>]
-  /// CHECK:       <<Not2:i\d+>>        Xor [<<P2>>,<<CstM1>>]
+  /// CHECK:       <<Not1:i\d+>>        Not [<<P1>>]
+  /// CHECK:       <<Not2:i\d+>>        Not [<<P2>>]
   /// CHECK:       <<Xor:i\d+>>         Xor [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Xor>>]
 
@@ -285,11 +277,10 @@
   /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
-  /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
   /// CHECK:       <<One:i\d+>>         IntConstant 1
-  /// CHECK:       <<Not2:i\d+>>        Xor [<<P2>>,<<CstM1>>]
+  /// CHECK:       <<Not2:i\d+>>        Not [<<P2>>]
   /// CHECK:       <<And2:i\d+>>        And [<<Not2>>,<<One>>]
-  /// CHECK:       <<Not1:i\d+>>        Xor [<<P1>>,<<CstM1>>]
+  /// CHECK:       <<Not1:i\d+>>        Not [<<P1>>]
   /// CHECK:       <<And1:i\d+>>        And [<<Not1>>,<<Not2>>]
   /// CHECK:       <<Add:i\d+>>         Add [<<And2>>,<<And1>>]
   /// CHECK:                            Return [<<Add>>]
diff --git a/test/577-checker-fp2int/expected.txt b/test/577-checker-fp2int/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/577-checker-fp2int/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/577-checker-fp2int/info.txt b/test/577-checker-fp2int/info.txt
new file mode 100644
index 0000000..d22a0ea
--- /dev/null
+++ b/test/577-checker-fp2int/info.txt
@@ -0,0 +1 @@
+Unit test for float/double to raw bits conversions.
diff --git a/test/577-checker-fp2int/src/Main.java b/test/577-checker-fp2int/src/Main.java
new file mode 100644
index 0000000..ace956d
--- /dev/null
+++ b/test/577-checker-fp2int/src/Main.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  /// CHECK-START: int Main.f2int(float) instruction_simplifier (before)
+  /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:FloatFloatToIntBits
+  /// CHECK-DAG: Return [<<Result>>]
+  //
+  /// CHECK-START: int Main.f2int(float) instruction_simplifier (after)
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG: <<Raw:i\d+>> InvokeStaticOrDirect [<<Arg:f\d+>>{{(,[ij]\d+)?}}] intrinsic:FloatFloatToRawIntBits
+  /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Arg>>]
+  /// CHECK-DAG: <<Result:i\d+>> Select [<<Raw>>,{{i\d+}},<<Cond>>]
+  /// CHECK-DAG: Return [<<Result>>]
+  private static int f2int(float f) {
+    return Float.floatToIntBits(f);
+  }
+
+  /// CHECK-START: long Main.d2long(double) instruction_simplifier (before)
+  /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:DoubleDoubleToLongBits
+  /// CHECK-DAG: Return [<<Result>>]
+  //
+  /// CHECK-START: long Main.d2long(double) instruction_simplifier (after)
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG: <<Raw:j\d+>> InvokeStaticOrDirect [<<Arg:d\d+>>{{(,[ij]\d+)?}}] intrinsic:DoubleDoubleToRawLongBits
+  /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Arg>>]
+  /// CHECK-DAG: <<Result:j\d+>> Select [<<Raw>>,{{j\d+}},<<Cond>>]
+  /// CHECK-DAG: Return [<<Result>>]
+  private static long d2long(double d) {
+    return Double.doubleToLongBits(d);
+  }
+
+  public static void main(String args[]) {
+    // A few distinct numbers.
+    expectEquals32(0xff800000, f2int(Float.NEGATIVE_INFINITY));
+    expectEquals32(0xbf800000, f2int(-1.0f));
+    expectEquals32(0x80000000, f2int(-0.0f));
+    expectEquals32(0x00000000, f2int(+0.0f));
+    expectEquals32(0x3f800000, f2int(+1.0f));
+    expectEquals32(0x7f800000, f2int(Float.POSITIVE_INFINITY));
+
+    // A few others.
+    for (int i = 0; i <= 100; i++) {
+      expectEquals32(i, f2int(Float.intBitsToFloat(i)));
+    }
+
+    // A few NaN numbers.
+    float[] fvals = {
+      Float.intBitsToFloat(0x7f800001),
+      Float.intBitsToFloat(0x7fa00000),
+      Float.intBitsToFloat(0x7fc00000),
+      Float.intBitsToFloat(0x7fffffff),
+      Float.intBitsToFloat(0xff800001),
+      Float.intBitsToFloat(0xffa00000),
+      Float.intBitsToFloat(0xffc00000),
+      Float.intBitsToFloat(0xffffffff)
+    };
+    for (int i = 0; i < fvals.length; i++) {
+      expectEquals32(0x7fc00000, f2int(fvals[i]));
+    }
+
+    // A few distinct numbers.
+    expectEquals64(0xfff0000000000000L, d2long(Double.NEGATIVE_INFINITY));
+    expectEquals64(0xbff0000000000000L, d2long(-1.0d));
+    expectEquals64(0x8000000000000000L, d2long(-0.0d));
+    expectEquals64(0x0000000000000000L, d2long(+0.0d));
+    expectEquals64(0x3ff0000000000000L, d2long(+1.0d));
+    expectEquals64(0x7ff0000000000000L, d2long(Double.POSITIVE_INFINITY));
+
+    // A few others.
+    for (long l = 0; l <= 100; l++) {
+      expectEquals64(l, d2long(Double.longBitsToDouble(l)));
+    }
+
+    // A few NaN numbers.
+    double[] dvals = {
+      Double.longBitsToDouble(0x7ff0000000000001L),
+      Double.longBitsToDouble(0x7ff4000000000000L),
+      Double.longBitsToDouble(0x7ff8000000000000L),
+      Double.longBitsToDouble(0x7fffffffffffffffL),
+      Double.longBitsToDouble(0xfff0000000000001L),
+      Double.longBitsToDouble(0xfff4000000000000L),
+      Double.longBitsToDouble(0xfff8000000000000L),
+      Double.longBitsToDouble(0xffffffffffffffffL)
+    };
+    for (int i = 0; i < dvals.length; i++) {
+      expectEquals64(0x7ff8000000000000L, d2long(dvals[i]));
+    }
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals32(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: "
+          + Integer.toHexString(expected)
+          + ", found: "
+          + Integer.toHexString(result));
+    }
+  }
+
+  private static void expectEquals64(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: "
+          + Long.toHexString(expected)
+          + ", found: "
+          + Long.toHexString(result));
+    }
+  }
+}
diff --git a/test/578-bce-visit/expected.txt b/test/578-bce-visit/expected.txt
new file mode 100644
index 0000000..28fca2c
--- /dev/null
+++ b/test/578-bce-visit/expected.txt
@@ -0,0 +1,2 @@
+exception caught
+FUZZ result = 1001 16
diff --git a/test/578-bce-visit/info.txt b/test/578-bce-visit/info.txt
new file mode 100644
index 0000000..2462e1b
--- /dev/null
+++ b/test/578-bce-visit/info.txt
@@ -0,0 +1 @@
+Fuzz test that exposed bug in bounds check elimination visiting of blocks.
diff --git a/test/578-bce-visit/src/Main.java b/test/578-bce-visit/src/Main.java
new file mode 100644
index 0000000..b0e920e
--- /dev/null
+++ b/test/578-bce-visit/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Automatically generated fuzz test that exposed bug in the way bounds
+ * check elimination visits basic blocks. If, after dynamic bce, the same
+ * block would be visited again, then static length based bce would incorrectly
+ * feed information back to itself and removed a necessary bounds check.
+ */
+public class Main {
+
+  private static int[][][] mA = new int[10][10][10];
+
+  private static int mX = 17;
+
+  private static int doit() {
+    int l0 = (((++mA[7][2][8]) <= mA[0][1][3]) ? (++mA[9][0][5]) : ((( -mA[0][7][0]) * ((mX == mX) ? 180 : mX)) + (mA[7][8][8]++)));
+    mA[1][0][4] -= mX;
+    int l1 = (((l0 >= ( ~mA[6][7][5])) && ((921 <= l0) && (mA[3][9][6] > l0))) ? mX : (l0--));
+    int l2 = ( -384);
+    for (int i0 = 7 - 1; i0 >= 1; i0--) {
+      mA[6][0][0] -= ((((l0++) == ( -mX)) ? (((mA[3][i0][1] > 503) || (mX <= i0)) ? (--l0) : (l0--)) : mX) - ( ~(mX--)));
+      int l3 = 24;
+      int l4 = ((l2--) & mX);
+      for (int i1 = i0-2 - 1; i1 >= 3; i1--) {
+        for (int i2 = 2; i2 < i0; i2++) {
+          mA[i0][4][l3] >>= 1;
+        }
+      }
+    }
+    return 1;
+  }
+
+  public static void main(String[] args) {
+    int k = 1;
+    for (int i0 = 0; i0 < 10; i0++)
+    for (int i1 = 0; i1 < 10; i1++)
+    for (int i2 = 0; i2 < 10; i2++)
+      mA[i0][i1][i2] = k++;
+    try {
+      k = doit();
+    } catch (Exception e) {
+      System.out.println("exception caught");
+    }
+    System.out.println("FUZZ result = " + k + " " + mX);
+  }
+}
diff --git a/test/578-polymorphic-inlining/expected.txt b/test/578-polymorphic-inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/578-polymorphic-inlining/expected.txt
diff --git a/test/578-polymorphic-inlining/info.txt b/test/578-polymorphic-inlining/info.txt
new file mode 100644
index 0000000..77ec49b
--- /dev/null
+++ b/test/578-polymorphic-inlining/info.txt
@@ -0,0 +1,2 @@
+Regression test for polymorphic inlining that used to propagate
+wrongly the try/catch information of new blocks.
diff --git a/test/578-polymorphic-inlining/src/Main.java b/test/578-polymorphic-inlining/src/Main.java
new file mode 100644
index 0000000..22d33d0
--- /dev/null
+++ b/test/578-polymorphic-inlining/src/Main.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    for (int i = 0; i < 20000; ++i) {
+      $noinline$testInTryCatch(new Main(), i);
+      $noinline$testInTryCatch(new SubMain(), i);
+    }
+  }
+
+  public static void $noinline$testInTryCatch(Main m, int i) {
+    final int value;
+    try {
+      throw new Exception();
+    } catch (Exception e) {
+      // The polymorphic inlining of 'willInlineVoid' used to generate an
+      // incorrect graph, by setting the inlined blocks as catch blocks.
+      m.willInlineVoid(i);
+      return;
+    }
+  }
+
+  public void willInlineVoid(int i) {
+    if (i == 0) {
+      $noinline$foo();
+    } else {
+      $noinline$foo();
+      $noinline$foo();
+    }
+  }
+
+  public static void $noinline$foo() {
+    if (doThrow) throw new Error("");
+  }
+
+  public static boolean doThrow;
+}
+
+class SubMain extends Main {
+  public void willInlineVoid(int i) {
+  }
+}
diff --git a/test/579-inline-infinite/expected.txt b/test/579-inline-infinite/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/579-inline-infinite/expected.txt
diff --git a/test/579-inline-infinite/info.txt b/test/579-inline-infinite/info.txt
new file mode 100644
index 0000000..6fb917c
--- /dev/null
+++ b/test/579-inline-infinite/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing.
+Inlining of method with infinite loop cause a crash.
diff --git a/test/579-inline-infinite/src/Main.java b/test/579-inline-infinite/src/Main.java
new file mode 100644
index 0000000..f214ed4
--- /dev/null
+++ b/test/579-inline-infinite/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Infinite implements Runnable {
+  public int field;
+
+  private final void $noinline$infinite() {
+    while(true) {
+      field++;
+    }
+  }
+
+  public void run() {
+    $noinline$infinite();
+  }
+}
+
+public class Main {
+  public static void main(String[] args) {
+    Thread thr = new Thread(new Infinite());
+    thr.setDaemon(true);
+    thr.start();
+    // This is a compiler test, so just finish.
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 364be59..e05d4f5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -220,6 +220,24 @@
         $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
         $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(ART_TEST_RUN_TEST_SKIP), $(ALL_ADDRESS_SIZES))
 
+
+# Disable 097-duplicate-method while investigation (broken by latest Jack release, b/27358065)
+# Disable 137-cfi (b/27391690).
+# Disable 536-checker-needs-access-check and 537-checker-inline-and-unverified (b/27425061)
+# Disable 577-profile-foreign-dex (b/27454772).
+TEST_ART_BROKEN_ALL_TARGET_TESTS := \
+  097-duplicate-method \
+  536-checker-needs-access-check \
+  537-checker-inline-and-unverified \
+  577-profile-foreign-dex \
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+    $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+    $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_ALL_TARGET_TESTS), \
+    $(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_ALL_TARGET_TESTS :=
+
 # Tests that are timing sensitive and flaky on heavily loaded systems.
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
   002-sleep \
@@ -533,6 +551,7 @@
   $(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS)
 
 # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
+# 145: Test sometimes times out in read barrier configuration (b/27467554).
 # 484: Baker's fast path based read barrier compiler instrumentation generates code containing
 #      more parallel moves on x86, thus some Checker assertions may fail.
 # 527: On ARM64, the read barrier instrumentation does not support the HArm64IntermediateAddress
@@ -540,6 +559,7 @@
 # 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are
 #      not yet handled in the read barrier configuration.
 TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
+  145-alloc-tracking-stress \
   484-checker-register-hints \
   527-checker-array-access-split \
   537-checker-arraycopy
diff --git a/test/run-test b/test/run-test
index f1875d7..d0f93b9 100755
--- a/test/run-test
+++ b/test/run-test
@@ -677,11 +677,7 @@
 # Tests named '<number>-checker-*' will also have their CFGs verified with
 # Checker when compiled with Optimizing on host.
 if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
-  # Jack does not necessarily generate the same DEX output than dx. Because these tests depend
-  # on a particular DEX output, keep building them with dx for now (b/19467889).
-  USE_JACK="false"
-
-  if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" ]; then
+  if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$USE_JACK" = "true" ]; then
     # Optimizing has read barrier support for certain architectures
     # only. On other architectures, compiling is disabled when read
     # barriers are enabled, meaning that we do not produce a CFG file
diff --git a/tools/Android.mk b/tools/Android.mk
index 9a96f7a..bc2fd8c 100644
--- a/tools/Android.mk
+++ b/tools/Android.mk
@@ -19,21 +19,14 @@
 # Copy the art shell script to the host's bin directory
 include $(CLEAR_VARS)
 LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_MODULE := art
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP)
-	@echo "Copy: $(PRIVATE_MODULE) ($@)"
-	$(copy-file-to-new-target)
-	$(hide) chmod 755 $@
+LOCAL_SRC_FILES := art
+include $(BUILD_PREBUILT)
 
 # Copy the art shell script to the target's bin directory
 include $(CLEAR_VARS)
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_MODULE := art
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP)
-	@echo "Copy: $(PRIVATE_MODULE) ($@)"
-	$(copy-file-to-new-target)
-	$(hide) chmod 755 $@
+LOCAL_SRC_FILES := art
+include $(BUILD_PREBUILT)
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 6869b04..cfbafde 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -35,16 +35,10 @@
 # --- ahat script ----------------
 include $(CLEAR_VARS)
 LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_MODULE := ahat
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/ahat $(ACP)
-	@echo "Copy: $(PRIVATE_MODULE) ($@)"
-	$(copy-file-to-new-target)
-	$(hide) chmod 755 $@
-
-ahat: $(LOCAL_BUILT_MODULE)
+LOCAL_SRC_FILES := ahat
+include $(BUILD_PREBUILT)
 
 # --- ahat-tests.jar --------------
 include $(CLEAR_VARS)
diff --git a/tools/checker/common/logger.py b/tools/checker/common/logger.py
index 28bb458..f13eaf6 100644
--- a/tools/checker/common/logger.py
+++ b/tools/checker/common/logger.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 from __future__ import print_function
+import collections
 import sys
 
 class Logger(object):
@@ -21,7 +22,7 @@
     NoOutput, Error, Info = range(3)
 
   class Color(object):
-    Default, Blue, Gray, Purple, Red = range(5)
+    Default, Blue, Gray, Purple, Red, Green = range(6)
 
     @staticmethod
     def terminalCode(color, out=sys.stdout):
@@ -35,6 +36,8 @@
         return '\033[95m'
       elif color == Logger.Color.Red:
         return '\033[91m'
+      elif color == Logger.Color.Green:
+        return '\033[32m'
       else:
         return '\033[0m'
 
@@ -52,19 +55,34 @@
       out.flush()
 
   @staticmethod
-  def fail(msg, file=None, line=-1):
-    location = ""
-    if file:
-      location += file + ":"
-    if line > 0:
-      location += str(line) + ":"
-    if location:
-      location += " "
-
-    Logger.log(location, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
+  def fail(msg, file=None, line=-1, lineText=None, variables=None):
     Logger.log("error: ", Logger.Level.Error, color=Logger.Color.Red, newLine=False, out=sys.stderr)
     Logger.log(msg, Logger.Level.Error, out=sys.stderr)
-    sys.exit(msg)
+
+    if lineText:
+      loc = ""
+      if file:
+        loc += file + ":"
+      if line > 0:
+        loc += str(line) + ":"
+      if loc:
+        loc += " "
+      Logger.log(loc, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
+      Logger.log(lineText, Logger.Level.Error, out=sys.stderr)
+
+    if variables:
+      longestName = 0
+      for var in variables:
+        longestName = max(longestName, len(var))
+
+      for var in collections.OrderedDict(sorted(variables.items())):
+        padding = ' ' * (longestName - len(var))
+        Logger.log(var, Logger.Level.Error, color=Logger.Color.Green, newLine=False, out=sys.stderr)
+        Logger.log(padding, Logger.Level.Error, newLine=False, out=sys.stderr)
+        Logger.log(" = ", Logger.Level.Error, newLine=False, out=sys.stderr)
+        Logger.log(variables[var], Logger.Level.Error, out=sys.stderr)
+
+    sys.exit(1)
 
   @staticmethod
   def startTest(name):
@@ -76,6 +94,6 @@
     Logger.log("PASS", color=Logger.Color.Blue)
 
   @staticmethod
-  def testFailed(msg, file=None, line=-1):
+  def testFailed(msg, assertion, variables):
     Logger.log("FAIL", color=Logger.Color.Red)
-    Logger.fail(msg, file, line)
+    Logger.fail(msg, assertion.fileName, assertion.lineNo, assertion.originalText, variables)
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index 3ded074..6ff19d5 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -23,9 +23,10 @@
 MatchInfo = namedtuple("MatchInfo", ["scope", "variables"])
 
 class MatchFailedException(Exception):
-  def __init__(self, assertion, lineNo):
+  def __init__(self, assertion, lineNo, variables):
     self.assertion = assertion
     self.lineNo = lineNo
+    self.variables = variables
 
 def splitIntoGroups(assertions):
   """ Breaks up a list of assertions, grouping instructions which should be
@@ -58,7 +59,7 @@
     newVariables = MatchLines(assertion, c1Pass.body[i], variables)
     if newVariables is not None:
       return MatchInfo(MatchScope(i, i), newVariables)
-  raise MatchFailedException(assertion, scope.start)
+  raise MatchFailedException(assertion, scope.start, variables)
 
 def matchDagGroup(assertions, c1Pass, scope, variables):
   """ Attempts to find matching `c1Pass` lines for a group of DAG assertions.
@@ -92,12 +93,12 @@
     for assertion in assertions:
       assert assertion.variant == TestAssertion.Variant.Not
       if MatchLines(assertion, line, variables) is not None:
-        raise MatchFailedException(assertion, i)
+        raise MatchFailedException(assertion, i, variables)
 
 def testEvalGroup(assertions, scope, variables):
   for assertion in assertions:
     if not EvaluateLine(assertion, variables):
-      raise MatchFailedException(assertion, scope.start)
+      raise MatchFailedException(assertion, scope.start, variables)
 
 def MatchTestCase(testCase, c1Pass):
   """ Runs a test case against a C1visualizer graph dump.
@@ -181,8 +182,8 @@
     except MatchFailedException as e:
       lineNo = c1Pass.startLineNo + e.lineNo
       if e.assertion.variant == TestAssertion.Variant.Not:
-        Logger.testFailed("NOT assertion matched line {}".format(lineNo),
-                          e.assertion.fileName, e.assertion.lineNo)
+        msg = "NOT assertion matched line {}"
       else:
-        Logger.testFailed("Assertion could not be matched starting from line {}".format(lineNo),
-                          e.assertion.fileName, e.assertion.lineNo)
+        msg = "Assertion could not be matched starting from line {}"
+      msg = msg.format(lineNo)
+      Logger.testFailed(msg, e.assertion, e.variables)
diff --git a/tools/checker/match/line.py b/tools/checker/match/line.py
index 08f001f..ed48a53 100644
--- a/tools/checker/match/line.py
+++ b/tools/checker/match/line.py
@@ -35,15 +35,13 @@
   if name in variables:
     return variables[name]
   else:
-    Logger.testFailed("Missing definition of variable \"{}\"".format(name),
-                      pos.fileName, pos.lineNo)
+    Logger.testFailed("Missing definition of variable \"{}\"".format(name), pos, variables)
 
 def setVariable(name, value, variables, pos):
   if name not in variables:
     return variables.copyWith(name, value)
   else:
-    Logger.testFailed("Multiple definitions of variable \"{}\"".format(name),
-                      pos.fileName, pos.lineNo)
+    Logger.testFailed("Multiple definitions of variable \"{}\"".format(name), pos, variables)
 
 def matchWords(checkerWord, stringWord, variables, pos):
   """ Attempts to match a list of TestExpressions against a string.
diff --git a/tools/dexfuzz/Android.mk b/tools/dexfuzz/Android.mk
index 1580bc3..473f6de 100644
--- a/tools/dexfuzz/Android.mk
+++ b/tools/dexfuzz/Android.mk
@@ -27,14 +27,10 @@
 # --- dexfuzz script ----------------
 include $(CLEAR_VARS)
 LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_MODULE := dexfuzz
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/dexfuzz $(ACP)
-	@echo "Copy: $(PRIVATE_MODULE) ($@)"
-	$(copy-file-to-new-target)
-	$(hide) chmod 755 $@
+LOCAL_SRC_FILES := dexfuzz
+include $(BUILD_PREBUILT)
 
 # --- dexfuzz script with core image dependencies ----------------
 fuzzer: $(LOCAL_BUILT_MODULE) $(HOST_CORE_IMG_OUTS)
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index e6394a9..46100ae 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -208,13 +208,6 @@
           "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
 },
 {
-  description: "'cat -' does not work anymore",
-  result: EXEC_FAILED,
-  bug: 26395656,
-  modes: [device],
-  names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getOutputStream"]
-},
-{
   description: "Missing resource in classpath",
   result: EXEC_FAILED,
   modes: [device],
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index e4af9fa..8422e20 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -51,7 +51,7 @@
 image_compiler_option=""
 debug="no"
 verbose="no"
-image="-Ximage:/data/art-test/core-jit.art"
+image="-Ximage:/data/art-test/core-optimizing-pic.art"
 vm_args=""
 # By default, we run the whole JDWP test suite.
 test="org.apache.harmony.jpda.tests.share.AllTests"
@@ -70,9 +70,6 @@
     device_dir=""
     # Vogar knows which VM to use on host.
     vm_command=""
-    # We only compile the image on the host. Note that not providing this option
-    # for target testing puts us below the adb command limit for vogar.
-    image_compiler_option="--vm-arg -Ximage-compiler-option --vm-arg --debuggable"
     shift
   elif [[ $1 == -Ximage:* ]]; then
     image="$1"
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index d5b8989..45b60dc 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -34,6 +34,9 @@
 echo -e "${green}Uptime${nc}"
 adb shell uptime
 
+echo -e "${green}Battery info${nc}"
+adb shell dumpsys battery
+
 echo -e "${green}Kill stalled dalvikvm processes${nc}"
 processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}')
 for i in $processes; do adb shell kill -9 $i; done
diff --git a/tools/symbolize-buildbot-crashes.sh b/tools/symbolize-buildbot-crashes.sh
new file mode 100755
index 0000000..8dc4e27
--- /dev/null
+++ b/tools/symbolize-buildbot-crashes.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We push art and its dependencies to '/data/local/tmp', but the 'stack'
+# script expect things to be in '/'. So we just remove the
+# '/data/local/tmp' prefix.
+adb logcat -d | sed 's,/data/local/tmp,,g' | development/scripts/stack
+
+# Always return 0 to avoid having the buildbot complain about wrong stacks.
+exit 0