Merge "Sort output of a find command in makefile"
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 209bb5a..385f34a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -211,11 +211,9 @@
     generate_debug_info_ = false;
   } else if (option == "--debuggable") {
     debuggable_ = true;
-    generate_debug_info_ = true;
   } else if (option == "--native-debuggable") {
     native_debuggable_ = true;
     debuggable_ = true;
-    generate_debug_info_ = true;
   } else if (option.starts_with("--top-k-profile-threshold=")) {
     ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
   } else if (option == "--include-patch-information") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index f8032bb..f14bdc4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -50,7 +50,7 @@
   static const size_t kDefaultNumDexMethodsThreshold = 900;
   static constexpr double kDefaultTopKProfileThreshold = 90.0;
   static const bool kDefaultNativeDebuggable = false;
-  static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
+  static const bool kDefaultGenerateDebugInfo = false;
   static const bool kDefaultIncludePatchInformation = false;
   static const size_t kDefaultInlineDepthLimit = 3;
   static const size_t kDefaultInlineMaxCodeUnits = 32;
diff --git a/compiler/dwarf/method_debug_info.h b/compiler/dwarf/method_debug_info.h
index a391e4d..e8ba914 100644
--- a/compiler/dwarf/method_debug_info.h
+++ b/compiler/dwarf/method_debug_info.h
@@ -30,8 +30,8 @@
   uint32_t access_flags_;
   const DexFile::CodeItem* code_item_;
   bool deduped_;
-  uint32_t low_pc_;
-  uint32_t high_pc_;
+  uintptr_t low_pc_;
+  uintptr_t high_pc_;
   CompiledMethod* compiled_method_;
 };
 
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index bb07cc2..a7461a5 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -148,6 +148,12 @@
       }
     }
 
+    // Returns true if the section was written to disk.
+    // (Used to check whether we have .text when writing JIT debug info)
+    bool Exists() const {
+      return finished_;
+    }
+
     // Get the location of this section in virtual memory.
     Elf_Addr GetAddress() const {
       CHECK(started_);
@@ -247,16 +253,18 @@
     }
 
     // Buffer symbol for this section.  It will be written later.
+    // If the symbol's section is null, it will be considered absolute (SHN_ABS).
+    // (we use this in JIT to reference code which is stored outside the debug ELF file)
     void Add(Elf_Word name, const Section* section,
              Elf_Addr addr, bool is_relative, Elf_Word size,
              uint8_t binding, uint8_t type, uint8_t other = 0) {
-      CHECK(section != nullptr);
       Elf_Sym sym = Elf_Sym();
       sym.st_name = name;
       sym.st_value = addr + (is_relative ? section->GetAddress() : 0);
       sym.st_size = size;
       sym.st_other = other;
-      sym.st_shndx = section->GetSectionIndex();
+      sym.st_shndx = (section != nullptr ? section->GetSectionIndex()
+                                         : static_cast<Elf_Word>(SHN_ABS));
       sym.st_info = (binding << 4) + (type & 0xf);
       symbols_.push_back(sym);
     }
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 2bc8c89..dd50f69 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -22,16 +22,20 @@
 #include "base/casts.h"
 #include "base/stl_util.h"
 #include "compiled_method.h"
-#include "driver/compiler_driver.h"
 #include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
 #include "dwarf/dedup_vector.h"
 #include "dwarf/headers.h"
 #include "dwarf/method_debug_info.h"
 #include "dwarf/register.h"
 #include "elf_builder.h"
+#include "linker/vector_output_stream.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
 #include "oat_writer.h"
-#include "utils.h"
 #include "stack_map.h"
+#include "utils.h"
 
 namespace art {
 namespace dwarf {
@@ -219,6 +223,10 @@
   CHECK(format == DW_DEBUG_FRAME_FORMAT || format == DW_EH_FRAME_FORMAT);
   typedef typename ElfTypes::Addr Elf_Addr;
 
+  if (method_infos.empty()) {
+    return;
+  }
+
   std::vector<uint32_t> binary_search_table;
   std::vector<uintptr_t> patch_locations;
   if (format == DW_EH_FRAME_FORMAT) {
@@ -234,7 +242,9 @@
   {
     cfi_section->Start();
     const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
-    const Elf_Addr text_address = builder->GetText()->GetAddress();
+    const Elf_Addr text_address = builder->GetText()->Exists()
+        ? builder->GetText()->GetAddress()
+        : 0;
     const Elf_Addr cfi_address = cfi_section->GetAddress();
     const Elf_Addr cie_address = cfi_address;
     Elf_Addr buffer_address = cfi_address;
@@ -305,8 +315,8 @@
   struct CompilationUnit {
     std::vector<const MethodDebugInfo*> methods_;
     size_t debug_line_offset_ = 0;
-    uint32_t low_pc_ = 0xFFFFFFFFU;
-    uint32_t high_pc_ = 0;
+    uintptr_t low_pc_ = std::numeric_limits<uintptr_t>::max();
+    uintptr_t high_pc_ = 0;
   };
 
   typedef std::vector<DexFile::LocalInfo> LocalInfos;
@@ -439,14 +449,17 @@
 
     void Write(const CompilationUnit& compilation_unit) {
       CHECK(!compilation_unit.methods_.empty());
-      const Elf_Addr text_address = owner_->builder_->GetText()->GetAddress();
+      const Elf_Addr text_address = owner_->builder_->GetText()->Exists()
+          ? owner_->builder_->GetText()->GetAddress()
+          : 0;
+      const uintptr_t cu_size = compilation_unit.high_pc_ - compilation_unit.low_pc_;
 
       info_.StartTag(DW_TAG_compile_unit);
       info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
       info_.WriteData1(DW_AT_language, DW_LANG_Java);
       info_.WriteStrp(DW_AT_comp_dir, owner_->WriteString("$JAVA_SRC_ROOT"));
       info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc_);
-      info_.WriteUdata(DW_AT_high_pc, compilation_unit.high_pc_ - compilation_unit.low_pc_);
+      info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size));
       info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset_);
 
       const char* last_dex_class_desc = nullptr;
@@ -464,8 +477,16 @@
           if (last_dex_class_desc != nullptr) {
             EndClassTag(last_dex_class_desc);
           }
-          size_t offset = StartClassTag(dex_class_desc);
-          type_cache_.emplace(dex_class_desc, offset);
+          // Write reference tag for the class we are about to declare.
+          size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type);
+          type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset);
+          size_t type_attrib_offset = info_.size();
+          info_.WriteRef4(DW_AT_type, 0);
+          info_.EndTag();
+          // Declare the class that owns this method.
+          size_t class_offset = StartClassTag(dex_class_desc);
+          info_.UpdateUint32(type_attrib_offset, class_offset);
+          info_.WriteFlag(DW_AT_declaration, true);
           // Check that each class is defined only once.
           bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second;
           CHECK(unique) << "Redefinition of " << dex_class_desc;
@@ -476,7 +497,7 @@
         info_.StartTag(DW_TAG_subprogram);
         WriteName(dex->GetMethodName(dex_method));
         info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc_);
-        info_.WriteUdata(DW_AT_high_pc, mi->high_pc_ - mi->low_pc_);
+        info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc_-mi->low_pc_));
         uint8_t frame_base[] = { DW_OP_call_frame_cfa };
         info_.WriteExprLoc(DW_AT_frame_base, &frame_base, sizeof(frame_base));
         WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
@@ -562,6 +583,92 @@
       owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
     }
 
+    void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+      info_.StartTag(DW_TAG_compile_unit);
+      info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
+      info_.WriteData1(DW_AT_language, DW_LANG_Java);
+
+      for (mirror::Class* type : types) {
+        if (type->IsPrimitive()) {
+          // For primitive types the definition and the declaration is the same.
+          if (type->GetPrimitiveType() != Primitive::kPrimVoid) {
+            WriteTypeDeclaration(type->GetDescriptor(nullptr));
+          }
+        } else if (type->IsArrayClass()) {
+          mirror::Class* element_type = type->GetComponentType();
+          uint32_t component_size = type->GetComponentSize();
+          uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
+          uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+          info_.StartTag(DW_TAG_array_type);
+          std::string descriptor_string;
+          WriteLazyType(element_type->GetDescriptor(&descriptor_string));
+          info_.WriteUdata(DW_AT_data_member_location, data_offset);
+          info_.StartTag(DW_TAG_subrange_type);
+          DCHECK_LT(length_offset, 32u);
+          uint8_t count[] = {
+            DW_OP_push_object_address,
+            static_cast<uint8_t>(DW_OP_lit0 + length_offset),
+            DW_OP_plus,
+            DW_OP_deref_size,
+            4  // Array length is always 32-bit wide.
+          };
+          info_.WriteExprLoc(DW_AT_count, &count, sizeof(count));
+          info_.EndTag();  // DW_TAG_subrange_type.
+          info_.EndTag();  // DW_TAG_array_type.
+        } else {
+          std::string descriptor_string;
+          const char* desc = type->GetDescriptor(&descriptor_string);
+          StartClassTag(desc);
+
+          if (!type->IsVariableSize()) {
+            info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize());
+          }
+
+          // Base class.
+          mirror::Class* base_class = type->GetSuperClass();
+          if (base_class != nullptr) {
+            info_.StartTag(DW_TAG_inheritance);
+            WriteLazyType(base_class->GetDescriptor(&descriptor_string));
+            info_.WriteUdata(DW_AT_data_member_location, 0);
+            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+            info_.EndTag();  // DW_TAG_inheritance.
+          }
+
+          // Member variables.
+          for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) {
+            ArtField* field = type->GetInstanceField(i);
+            info_.StartTag(DW_TAG_member);
+            WriteName(field->GetName());
+            WriteLazyType(field->GetTypeDescriptor());
+            info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value());
+            uint32_t access_flags = field->GetAccessFlags();
+            if (access_flags & kAccPublic) {
+              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+            } else if (access_flags & kAccProtected) {
+              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected);
+            } else if (access_flags & kAccPrivate) {
+              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+            }
+            info_.EndTag();  // DW_TAG_member.
+          }
+
+          EndClassTag(desc);
+        }
+      }
+
+      CHECK_EQ(info_.Depth(), 1);
+      FinishLazyTypes();
+      info_.EndTag();  // DW_TAG_compile_unit.
+      std::vector<uint8_t> buffer;
+      buffer.reserve(info_.data()->size() + KB);
+      const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
+      const size_t debug_abbrev_offset =
+          owner_->debug_abbrev_.Insert(debug_abbrev_.data(), debug_abbrev_.size());
+      WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
+      owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+    }
+
     // Write table into .debug_loc which describes location of dex register.
     // The dex register might be valid only at some points and it might
     // move between machine registers and stack.
@@ -715,14 +822,14 @@
     // just define all types lazily at the end of compilation unit.
     void WriteLazyType(const char* type_descriptor) {
       if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
-        lazy_types_.emplace(type_descriptor, info_.size());
+        lazy_types_.emplace(std::string(type_descriptor), info_.size());
         info_.WriteRef4(DW_AT_type, 0);
       }
     }
 
     void FinishLazyTypes() {
       for (const auto& lazy_type : lazy_types_) {
-        info_.UpdateUint32(lazy_type.second, WriteType(lazy_type.first));
+        info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first));
       }
       lazy_types_.clear();
     }
@@ -747,30 +854,39 @@
 
     // Convert dex type descriptor to DWARF.
     // Returns offset in the compilation unit.
-    size_t WriteType(const char* desc) {
+    size_t WriteTypeDeclaration(const std::string& desc) {
+      DCHECK(!desc.empty());
       const auto& it = type_cache_.find(desc);
       if (it != type_cache_.end()) {
         return it->second;
       }
 
       size_t offset;
-      if (*desc == 'L') {
+      if (desc[0] == 'L') {
         // Class type. For example: Lpackage/name;
-        offset = StartClassTag(desc);
+        size_t class_offset = StartClassTag(desc.c_str());
         info_.WriteFlag(DW_AT_declaration, true);
-        EndClassTag(desc);
-      } else if (*desc == '[') {
+        EndClassTag(desc.c_str());
+        // Reference to the class type.
+        offset = info_.StartTag(DW_TAG_reference_type);
+        info_.WriteRef(DW_AT_type, class_offset);
+        info_.EndTag();
+      } else if (desc[0] == '[') {
         // Array type.
-        size_t element_type = WriteType(desc + 1);
-        offset = info_.StartTag(DW_TAG_array_type);
+        size_t element_type = WriteTypeDeclaration(desc.substr(1));
+        size_t array_type = info_.StartTag(DW_TAG_array_type);
+        info_.WriteFlag(DW_AT_declaration, true);
         info_.WriteRef(DW_AT_type, element_type);
         info_.EndTag();
+        offset = info_.StartTag(DW_TAG_reference_type);
+        info_.WriteRef4(DW_AT_type, array_type);
+        info_.EndTag();
       } else {
         // Primitive types.
         const char* name;
         uint32_t encoding;
         uint32_t byte_size;
-        switch (*desc) {
+        switch (desc[0]) {
         case 'B':
           name = "byte";
           encoding = DW_ATE_signed;
@@ -815,7 +931,7 @@
           LOG(FATAL) << "Void type should not be encoded";
           UNREACHABLE();
         default:
-          LOG(FATAL) << "Unknown dex type descriptor: " << desc;
+          LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\"";
           UNREACHABLE();
         }
         offset = info_.StartTag(DW_TAG_base_type);
@@ -865,9 +981,10 @@
     // Temporary buffer to create and store the entries.
     DebugInfoEntryWriter<> info_;
     // Cache of already translated type descriptors.
-    std::map<const char*, size_t, CStringLess> type_cache_;  // type_desc -> definition_offset.
+    std::map<std::string, size_t> type_cache_;  // type_desc -> definition_offset.
     // 32-bit references which need to be resolved to a type later.
-    std::multimap<const char*, size_t, CStringLess> lazy_types_;  // type_desc -> patch_offset.
+    // Given type may be used multiple times.  Therefore we need a multimap.
+    std::multimap<std::string, size_t> lazy_types_;  // type_desc -> patch_offset.
   };
 
  public:
@@ -883,6 +1000,11 @@
     writer.Write(compilation_unit);
   }
 
+  void WriteTypes(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+    CompilationUnitWriter writer(this);
+    writer.Write(types);
+  }
+
   void End() {
     builder_->GetDebugInfo()->End();
     builder_->WritePatches(".debug_info.oat_patches",
@@ -924,7 +1046,9 @@
   // Returns the number of bytes written.
   size_t WriteCompilationUnit(CompilationUnit& compilation_unit) {
     const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
-    const Elf_Addr text_address = builder_->GetText()->GetAddress();
+    const Elf_Addr text_address = builder_->GetText()->Exists()
+        ? builder_->GetText()->GetAddress()
+        : 0;
 
     compilation_unit.debug_line_offset_ = builder_->GetDebugLine()->GetSize();
 
@@ -1102,9 +1226,27 @@
   std::vector<uintptr_t> debug_line_patches;
 };
 
+// Get all types loaded by the runtime.
+static std::vector<mirror::Class*> GetLoadedRuntimeTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+  std::vector<mirror::Class*> result;
+  class CollectClasses : public ClassVisitor {
+   public:
+    virtual bool Visit(mirror::Class* klass) {
+      classes_->push_back(klass);
+      return true;
+    }
+    std::vector<mirror::Class*>* classes_;
+  };
+  CollectClasses visitor;
+  visitor.classes_ = &result;
+  Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+  return result;
+}
+
 template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
-                        const ArrayRef<const MethodDebugInfo>& method_infos) {
+static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+                               bool write_loaded_runtime_types,
+                               const ArrayRef<const MethodDebugInfo>& method_infos) {
   // Group the methods into compilation units based on source file.
   std::vector<CompilationUnit> compilation_units;
   const char* last_source_file = nullptr;
@@ -1122,7 +1264,7 @@
   }
 
   // Write .debug_line section.
-  {
+  if (!compilation_units.empty()) {
     DebugLineWriter<ElfTypes> line_writer(builder);
     line_writer.Start();
     for (auto& compilation_unit : compilation_units) {
@@ -1132,12 +1274,19 @@
   }
 
   // Write .debug_info section.
-  {
+  if (!compilation_units.empty() || write_loaded_runtime_types) {
     DebugInfoWriter<ElfTypes> info_writer(builder);
     info_writer.Start();
     for (const auto& compilation_unit : compilation_units) {
       info_writer.WriteCompilationUnit(compilation_unit);
     }
+    if (write_loaded_runtime_types) {
+      Thread* self = Thread::Current();
+      // The lock prevents the classes being moved by the GC.
+      ReaderMutexLock mu(self, *Locks::mutator_lock_);
+      std::vector<mirror::Class*> types = GetLoadedRuntimeTypes();
+      info_writer.WriteTypes(ArrayRef<mirror::Class*>(types.data(), types.size()));
+    }
     info_writer.End();
   }
 }
@@ -1173,11 +1322,13 @@
       name += " [DEDUPED]";
     }
 
+    const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr;
+    const bool is_relative = (text != nullptr);
     uint32_t low_pc = info.low_pc_;
     // Add in code delta, e.g., thumb bit 0 for Thumb2 code.
     low_pc += info.compiled_method_->CodeDelta();
-    symtab->Add(strtab->Write(name), builder->GetText(), low_pc,
-                true, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
+    symtab->Add(strtab->Write(name), text, low_pc,
+                is_relative, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
 
     // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
     // instructions, so that disassembler tools can correctly disassemble.
@@ -1185,8 +1336,8 @@
     // requires it to match function symbol.  Just address 0 does not work.
     if (info.compiled_method_->GetInstructionSet() == kThumb2) {
       if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
-        symtab->Add(strtab->Write("$t"), builder->GetText(), info.low_pc_ & ~1,
-                    true, 0, STB_LOCAL, STT_NOTYPE);
+        symtab->Add(strtab->Write("$t"), text, info.low_pc_ & ~1,
+                    is_relative, 0, STB_LOCAL, STT_NOTYPE);
         generated_mapping_symbol = true;
       }
     }
@@ -1202,25 +1353,89 @@
 
 template <typename ElfTypes>
 void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+                    bool write_loaded_runtime_types,
                     const ArrayRef<const MethodDebugInfo>& method_infos,
                     CFIFormat cfi_format) {
-  if (!method_infos.empty()) {
-    // Add methods to .symtab.
-    WriteDebugSymbols(builder, method_infos);
-    // Generate CFI (stack unwinding information).
-    WriteCFISection(builder, method_infos, cfi_format);
-    // Write DWARF .debug_* sections.
-    WriteDebugSections(builder, method_infos);
+  // Add methods to .symtab.
+  WriteDebugSymbols(builder, method_infos);
+  // Generate CFI (stack unwinding information).
+  WriteCFISection(builder, method_infos, cfi_format);
+  // Write DWARF .debug_* sections.
+  WriteDebugSections(builder, write_loaded_runtime_types, method_infos);
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal(
+    const dwarf::MethodDebugInfo& method_info) {
+  const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  VectorOutputStream out("Debug ELF file", &buffer);
+  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+  builder->Start();
+  WriteDebugInfo(builder.get(),
+                 false,
+                 ArrayRef<const MethodDebugInfo>(&method_info, 1),
+                 DW_DEBUG_FRAME_FORMAT);
+  builder->End();
+  CHECK(builder->Good());
+  // Make a copy of the buffer.  We want to shrink it anyway.
+  uint8_t* result = new uint8_t[buffer.size()];
+  CHECK(result != nullptr);
+  memcpy(result, buffer.data(), buffer.size());
+  return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info) {
+  const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+  if (Is64BitInstructionSet(isa)) {
+    return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info);
+  } else {
+    return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info);
+  }
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForClassInternal(const InstructionSet isa,
+                                                                 mirror::Class* type)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  VectorOutputStream out("Debug ELF file", &buffer);
+  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+  builder->Start();
+
+  DebugInfoWriter<ElfTypes> info_writer(builder.get());
+  info_writer.Start();
+  info_writer.WriteTypes(ArrayRef<mirror::Class*>(&type, 1));
+  info_writer.End();
+
+  builder->End();
+  CHECK(builder->Good());
+  // Make a copy of the buffer.  We want to shrink it anyway.
+  uint8_t* result = new uint8_t[buffer.size()];
+  CHECK(result != nullptr);
+  memcpy(result, buffer.data(), buffer.size());
+  return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type) {
+  if (Is64BitInstructionSet(isa)) {
+    return WriteDebugElfFileForClassInternal<ElfTypes64>(isa, type);
+  } else {
+    return WriteDebugElfFileForClassInternal<ElfTypes32>(isa, type);
   }
 }
 
 // Explicit instantiations
 template void WriteDebugInfo<ElfTypes32>(
     ElfBuilder<ElfTypes32>* builder,
+    bool write_loaded_runtime_types,
     const ArrayRef<const MethodDebugInfo>& method_infos,
     CFIFormat cfi_format);
 template void WriteDebugInfo<ElfTypes64>(
     ElfBuilder<ElfTypes64>* builder,
+    bool write_loaded_runtime_types,
     const ArrayRef<const MethodDebugInfo>& method_infos,
     CFIFormat cfi_format);
 
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 7ec0be1..91da00f 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -17,19 +17,30 @@
 #ifndef ART_COMPILER_ELF_WRITER_DEBUG_H_
 #define ART_COMPILER_ELF_WRITER_DEBUG_H_
 
-#include "elf_builder.h"
+#include "base/macros.h"
+#include "base/mutex.h"
 #include "dwarf/dwarf_constants.h"
-#include "oat_writer.h"
+#include "elf_builder.h"
 #include "utils/array_ref.h"
 
 namespace art {
+namespace mirror {
+class Class;
+}
 namespace dwarf {
+struct MethodDebugInfo;
 
 template <typename ElfTypes>
 void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+                    bool write_loaded_runtime_types,
                     const ArrayRef<const MethodDebugInfo>& method_infos,
                     CFIFormat cfi_format);
 
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info);
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type)
+    SHARED_REQUIRES(Locks::mutator_lock_);
+
 }  // namespace dwarf
 }  // namespace art
 
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 7b1bdd7..a67f3bd 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -152,7 +152,7 @@
 void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
     const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
   if (compiler_options_->GetGenerateDebugInfo()) {
-    dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
+    dwarf::WriteDebugInfo(builder_.get(), /* write_types */ true, method_infos, kCFIFormat);
   }
 }
 
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7bc0635..c51e62e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1511,7 +1511,7 @@
 }
 
 void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
   Primitive::Type type = instr->GetResultType();
@@ -1534,7 +1534,7 @@
 static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
 
 void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   LocationSummary* locations = instr->GetLocations();
   Primitive::Type type = instr->GetType();
 
@@ -1544,28 +1544,49 @@
   int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
   uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
   uint32_t shift_value = rhs_imm & shift_mask;
-  // Is the INS (Insert Bit Field) instruction supported?
-  bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+  // Are the INS (Insert Bit Field) and ROTR instructions supported?
+  bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
 
   switch (type) {
     case Primitive::kPrimInt: {
       Register dst = locations->Out().AsRegister<Register>();
       Register lhs = locations->InAt(0).AsRegister<Register>();
       if (use_imm) {
-        if (instr->IsShl()) {
+        if (shift_value == 0) {
+          if (dst != lhs) {
+            __ Move(dst, lhs);
+          }
+        } else if (instr->IsShl()) {
           __ Sll(dst, lhs, shift_value);
         } else if (instr->IsShr()) {
           __ Sra(dst, lhs, shift_value);
-        } else {
+        } else if (instr->IsUShr()) {
           __ Srl(dst, lhs, shift_value);
+        } else {
+          if (has_ins_rotr) {
+            __ Rotr(dst, lhs, shift_value);
+          } else {
+            __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
+            __ Srl(dst, lhs, shift_value);
+            __ Or(dst, dst, TMP);
+          }
         }
       } else {
         if (instr->IsShl()) {
           __ Sllv(dst, lhs, rhs_reg);
         } else if (instr->IsShr()) {
           __ Srav(dst, lhs, rhs_reg);
-        } else {
+        } else if (instr->IsUShr()) {
           __ Srlv(dst, lhs, rhs_reg);
+        } else {
+          if (has_ins_rotr) {
+            __ Rotrv(dst, lhs, rhs_reg);
+          } else {
+            __ Subu(TMP, ZERO, rhs_reg);
+            __ Sllv(TMP, lhs, TMP);
+            __ Srlv(dst, lhs, rhs_reg);
+            __ Or(dst, dst, TMP);
+          }
         }
       }
       break;
@@ -1580,7 +1601,7 @@
           if (shift_value == 0) {
             codegen_->Move64(locations->Out(), locations->InAt(0));
           } else if (shift_value < kMipsBitsPerWord) {
-            if (has_ins) {
+            if (has_ins_rotr) {
               if (instr->IsShl()) {
                 __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
                 __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
@@ -1589,10 +1610,15 @@
                 __ Srl(dst_low, lhs_low, shift_value);
                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
                 __ Sra(dst_high, lhs_high, shift_value);
+              } else if (instr->IsUShr()) {
+                __ Srl(dst_low, lhs_low, shift_value);
+                __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+                __ Srl(dst_high, lhs_high, shift_value);
               } else {
                 __ Srl(dst_low, lhs_low, shift_value);
                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
                 __ Srl(dst_high, lhs_high, shift_value);
+                __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
               }
             } else {
               if (instr->IsShl()) {
@@ -1605,11 +1631,18 @@
                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
                 __ Srl(dst_low, lhs_low, shift_value);
                 __ Or(dst_low, dst_low, TMP);
-              } else {
+              } else if (instr->IsUShr()) {
                 __ Srl(dst_high, lhs_high, shift_value);
                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
                 __ Srl(dst_low, lhs_low, shift_value);
                 __ Or(dst_low, dst_low, TMP);
+              } else {
+                __ Srl(TMP, lhs_low, shift_value);
+                __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
+                __ Or(dst_low, dst_low, TMP);
+                __ Srl(TMP, lhs_high, shift_value);
+                __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
+                __ Or(dst_high, dst_high, TMP);
               }
             }
           } else {
@@ -1620,9 +1653,29 @@
             } else if (instr->IsShr()) {
               __ Sra(dst_low, lhs_high, shift_value);
               __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
-            } else {
+            } else if (instr->IsUShr()) {
               __ Srl(dst_low, lhs_high, shift_value);
               __ Move(dst_high, ZERO);
+            } else {
+              if (shift_value == 0) {
+                // 64-bit rotation by 32 is just a swap.
+                __ Move(dst_low, lhs_high);
+                __ Move(dst_high, lhs_low);
+              } else {
+                if (has_ins_rotr) {
+                  __ Srl(dst_low, lhs_high, shift_value);
+                  __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
+                  __ Srl(dst_high, lhs_low, shift_value);
+                  __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+                } else {
+                  __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+                  __ Srl(dst_low, lhs_high, shift_value);
+                  __ Or(dst_low, dst_low, TMP);
+                  __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+                  __ Srl(dst_high, lhs_low, shift_value);
+                  __ Or(dst_high, dst_high, TMP);
+                }
+              }
             }
           }
       } else {
@@ -1649,7 +1702,7 @@
           __ Beqz(TMP, &done);
           __ Move(dst_low, dst_high);
           __ Sra(dst_high, dst_high, 31);
-        } else {
+        } else if (instr->IsUShr()) {
           __ Srlv(dst_high, lhs_high, rhs_reg);
           __ Nor(AT, ZERO, rhs_reg);
           __ Sll(TMP, lhs_high, 1);
@@ -1660,6 +1713,21 @@
           __ Beqz(TMP, &done);
           __ Move(dst_low, dst_high);
           __ Move(dst_high, ZERO);
+        } else {
+          __ Nor(AT, ZERO, rhs_reg);
+          __ Srlv(TMP, lhs_low, rhs_reg);
+          __ Sll(dst_low, lhs_high, 1);
+          __ Sllv(dst_low, dst_low, AT);
+          __ Or(dst_low, dst_low, TMP);
+          __ Srlv(TMP, lhs_high, rhs_reg);
+          __ Sll(dst_high, lhs_low, 1);
+          __ Sllv(dst_high, dst_high, AT);
+          __ Or(dst_high, dst_high, TMP);
+          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+          __ Beqz(TMP, &done);
+          __ Move(TMP, dst_high);
+          __ Move(dst_high, dst_low);
+          __ Move(dst_low, TMP);
         }
         __ Bind(&done);
       }
@@ -4539,14 +4607,12 @@
   codegen_->GenerateFrameExit();
 }
 
-void LocationsBuilderMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
+void LocationsBuilderMIPS::VisitRor(HRor* ror) {
+  HandleShift(ror);
 }
 
-void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
+void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
+  HandleShift(ror);
 }
 
 void LocationsBuilderMIPS::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7682ca7..9390fa6 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1247,7 +1247,7 @@
 }
 
 void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
   Primitive::Type type = instr->GetResultType();
@@ -1265,7 +1265,7 @@
 }
 
 void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   LocationSummary* locations = instr->GetLocations();
   Primitive::Type type = instr->GetType();
 
@@ -1290,13 +1290,19 @@
           ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
           : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
 
-        if (type == Primitive::kPrimInt) {
+        if (shift_value == 0) {
+          if (dst != lhs) {
+            __ Move(dst, lhs);
+          }
+        } else if (type == Primitive::kPrimInt) {
           if (instr->IsShl()) {
             __ Sll(dst, lhs, shift_value);
           } else if (instr->IsShr()) {
             __ Sra(dst, lhs, shift_value);
-          } else {
+          } else if (instr->IsUShr()) {
             __ Srl(dst, lhs, shift_value);
+          } else {
+            __ Rotr(dst, lhs, shift_value);
           }
         } else {
           if (shift_value < 32) {
@@ -1304,8 +1310,10 @@
               __ Dsll(dst, lhs, shift_value);
             } else if (instr->IsShr()) {
               __ Dsra(dst, lhs, shift_value);
-            } else {
+            } else if (instr->IsUShr()) {
               __ Dsrl(dst, lhs, shift_value);
+            } else {
+              __ Drotr(dst, lhs, shift_value);
             }
           } else {
             shift_value -= 32;
@@ -1313,8 +1321,10 @@
               __ Dsll32(dst, lhs, shift_value);
             } else if (instr->IsShr()) {
               __ Dsra32(dst, lhs, shift_value);
-            } else {
+            } else if (instr->IsUShr()) {
               __ Dsrl32(dst, lhs, shift_value);
+            } else {
+              __ Drotr32(dst, lhs, shift_value);
             }
           }
         }
@@ -1324,16 +1334,20 @@
             __ Sllv(dst, lhs, rhs_reg);
           } else if (instr->IsShr()) {
             __ Srav(dst, lhs, rhs_reg);
-          } else {
+          } else if (instr->IsUShr()) {
             __ Srlv(dst, lhs, rhs_reg);
+          } else {
+            __ Rotrv(dst, lhs, rhs_reg);
           }
         } else {
           if (instr->IsShl()) {
             __ Dsllv(dst, lhs, rhs_reg);
           } else if (instr->IsShr()) {
             __ Dsrav(dst, lhs, rhs_reg);
-          } else {
+          } else if (instr->IsUShr()) {
             __ Dsrlv(dst, lhs, rhs_reg);
+          } else {
+            __ Drotrv(dst, lhs, rhs_reg);
           }
         }
       }
@@ -3725,14 +3739,12 @@
   codegen_->GenerateFrameExit();
 }
 
-void LocationsBuilderMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
+void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
+  HandleShift(ror);
 }
 
-void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
+void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
+  HandleShift(ror);
 }
 
 void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c504ded..b90afb1 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -211,19 +211,6 @@
 
 // Try to replace a binary operation flanked by one UShr and one Shl with a bitfield rotation.
 bool InstructionSimplifierVisitor::TryReplaceWithRotate(HBinaryOperation* op) {
-  // This simplification is currently supported on x86, x86_64, ARM and ARM64.
-  // TODO: Implement it for MIPS/64.
-  const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
-  switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kThumb2:
-    case kX86:
-    case kX86_64:
-      break;
-    default:
-      return false;
-  }
   DCHECK(op->IsAdd() || op->IsXor() || op->IsOr());
   HInstruction* left = op->GetLeft();
   HInstruction* right = op->GetRight();
@@ -1261,19 +1248,6 @@
 void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, bool is_left) {
   DCHECK(invoke->IsInvokeStaticOrDirect());
   DCHECK_EQ(invoke->GetOriginalInvokeType(), InvokeType::kStatic);
-  // This simplification is currently supported on x86, x86_64, ARM and ARM64.
-  // TODO: Implement it for MIPS/64.
-  const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
-  switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kThumb2:
-    case kX86:
-    case kX86_64:
-      break;
-    default:
-      return;
-  }
   HInstruction* value = invoke->InputAt(0);
   HInstruction* distance = invoke->InputAt(1);
   // Replace the invoke with an HRor.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 1e6b3a1..b1fbf28 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -847,6 +847,9 @@
   }
 
   // Prevent reordering with prior memory operations.
+  // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
+  // latter allows a preceding load to be delayed past the STXR
+  // instruction below.
   __ dmb(ISH);
 
   __ add(tmp_ptr, base, ShifterOperand(offset));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index f723940..81cab86 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1035,7 +1035,11 @@
     __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
     __ Cbnz(tmp_32, &loop_head);
   } else {
-    __ Dmb(InnerShareable, BarrierWrites);
+    // Emit a `Dmb(InnerShareable, BarrierAll)` (DMB ISH) instruction
+    // instead of a `Dmb(InnerShareable, BarrierWrites)` (DMB ISHST)
+    // one, as the latter allows a preceding load to be delayed past
+    // the STXR instruction below.
+    __ Dmb(InnerShareable, BarrierAll);
     __ Bind(&loop_head);
     // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
     // the reference stored in the object before attempting the CAS,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 6d4275d..8de9700 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2146,10 +2146,7 @@
                            IntrinsicExceptions exceptions) {
   intrinsic_ = intrinsic;
   IntrinsicOptimizations opt(this);
-  if (needs_env_or_cache == kNoEnvironmentOrCache) {
-    opt.SetDoesNotNeedDexCache();
-    opt.SetDoesNotNeedEnvironment();
-  }
+
   // Adjust method's side effects from intrinsic table.
   switch (side_effects) {
     case kNoSideEffects: SetSideEffects(SideEffects::None()); break;
@@ -2157,6 +2154,14 @@
     case kWriteSideEffects: SetSideEffects(SideEffects::AllWrites()); break;
     case kAllSideEffects: SetSideEffects(SideEffects::AllExceptGCDependency()); break;
   }
+
+  if (needs_env_or_cache == kNoEnvironmentOrCache) {
+    opt.SetDoesNotNeedDexCache();
+    opt.SetDoesNotNeedEnvironment();
+  } else {
+    // If we need an environment, that means there will be a call, which can trigger GC.
+    SetSideEffects(GetSideEffects().Union(SideEffects::CanTriggerGC()));
+  }
   // Adjust method's exception status from intrinsic table.
   switch (exceptions) {
     case kNoThrow: SetCanThrow(false); break;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c06d164..b65d0f5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1868,6 +1868,10 @@
     return false;
   }
 
+  virtual bool IsActualObject() const {
+    return GetType() == Primitive::kPrimNot;
+  }
+
   void SetReferenceTypeInfo(ReferenceTypeInfo rti);
 
   ReferenceTypeInfo GetReferenceTypeInfo() const {
@@ -2487,8 +2491,10 @@
 // Deoptimize to interpreter, upon checking a condition.
 class HDeoptimize : public HTemplateInstruction<1> {
  public:
+  // We set CanTriggerGC to prevent any intermediate address to be live
+  // at the point of the `HDeoptimize`.
   HDeoptimize(HInstruction* cond, uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::None(), dex_pc) {
+      : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
     SetRawInputAt(0, cond);
   }
 
@@ -4526,8 +4532,10 @@
 
 class HNullCheck : public HExpression<1> {
  public:
+  // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
+  // constructor.
   HNullCheck(HInstruction* value, uint32_t dex_pc)
-      : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
+      : HExpression(value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
     SetRawInputAt(0, value);
   }
 
@@ -4848,8 +4856,10 @@
 
 class HBoundsCheck : public HExpression<2> {
  public:
+  // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
+  // constructor.
   HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
-      : HExpression(index->GetType(), SideEffects::None(), dex_pc) {
+      : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
     DCHECK(index->GetType() == Primitive::kPrimInt);
     SetRawInputAt(0, index);
     SetRawInputAt(1, length);
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 18405f2..445cdab 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -107,6 +107,7 @@
 
   bool CanBeMoved() const OVERRIDE { return true; }
   bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+  bool IsActualObject() const OVERRIDE { return false; }
 
   HInstruction* GetBaseAddress() const { return InputAt(0); }
   HInstruction* GetOffset() const { return InputAt(1); }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3eb7274..988e32b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -17,6 +17,7 @@
 #include "optimizing_compiler.h"
 
 #include <fstream>
+#include <memory>
 #include <stdint.h>
 
 #ifdef ART_ENABLE_CODEGEN_arm64
@@ -52,6 +53,8 @@
 #include "driver/compiler_driver-inl.h"
 #include "driver/compiler_options.h"
 #include "driver/dex_compilation_unit.h"
+#include "dwarf/method_debug_info.h"
+#include "elf_writer_debug.h"
 #include "elf_writer_quick.h"
 #include "graph_checker.h"
 #include "graph_visualizer.h"
@@ -60,6 +63,7 @@
 #include "inliner.h"
 #include "instruction_simplifier.h"
 #include "intrinsics.h"
+#include "jit/debugger_interface.h"
 #include "jit/jit_code_cache.h"
 #include "licm.h"
 #include "jni/quick/jni_compiler.h"
@@ -68,6 +72,7 @@
 #include "prepare_for_register_allocation.h"
 #include "reference_type_propagation.h"
 #include "register_allocator.h"
+#include "oat_quick_method_header.h"
 #include "sharpening.h"
 #include "side_effects_analysis.h"
 #include "ssa_builder.h"
@@ -968,6 +973,39 @@
     return false;
   }
 
+  if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) {
+    const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+    const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+    CompiledMethod compiled_method(
+        GetCompilerDriver(),
+        codegen->GetInstructionSet(),
+        ArrayRef<const uint8_t>(code_allocator.GetMemory()),
+        codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
+        codegen->GetCoreSpillMask(),
+        codegen->GetFpuSpillMask(),
+        ArrayRef<const SrcMapElem>(),
+        ArrayRef<const uint8_t>(),  // mapping_table.
+        ArrayRef<const uint8_t>(stack_map_data, stack_map_size),
+        ArrayRef<const uint8_t>(),  // native_gc_map.
+        ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+        ArrayRef<const LinkerPatch>());
+    dwarf::MethodDebugInfo method_debug_info {
+        dex_file,
+        class_def_idx,
+        method_idx,
+        access_flags,
+        code_item,
+        false,  // deduped.
+        code_address,
+        code_address + code_allocator.GetSize(),
+        &compiled_method
+    };
+    ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info);
+    CreateJITCodeEntryForAddress(code_address,
+                                 std::unique_ptr<const uint8_t[]>(elf_file.data()),
+                                 elf_file.size());
+  }
+
   return true;
 }
 
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index d399bc2..eb0419b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1677,6 +1677,9 @@
 
       LocationSummary* locations = safepoint_position->GetLocations();
       if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
+        DCHECK(interval->GetDefinedBy()->IsActualObject())
+            << interval->GetDefinedBy()->DebugName()
+            << "@" << safepoint_position->GetInstruction()->DebugName();
         locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
       }
 
@@ -1689,6 +1692,9 @@
                       maximum_number_of_live_fp_registers_);
           }
           if (current->GetType() == Primitive::kPrimNot) {
+            DCHECK(interval->GetDefinedBy()->IsActualObject())
+                << interval->GetDefinedBy()->DebugName()
+                << "@" << safepoint_position->GetInstruction()->DebugName();
             locations->SetRegisterBit(source.reg());
           }
           break;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 32a237a..808643a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -313,13 +313,12 @@
   UsageError("  -g");
   UsageError("  --generate-debug-info: Generate debug information for native debugging,");
   UsageError("      such as stack unwinding information, ELF symbols and DWARF sections.");
-  UsageError("      This generates all the available information. Unneeded parts can be");
-  UsageError("      stripped using standard command line tools such as strip or objcopy.");
-  UsageError("      (enabled by default in debug builds, disabled by default otherwise)");
+  UsageError("      If used without --native-debuggable, it will be best-effort only.");
+  UsageError("      This option does not affect the generated code. (disabled by default)");
   UsageError("");
   UsageError("  --no-generate-debug-info: Do not generate debug information for native debugging.");
   UsageError("");
-  UsageError("  --debuggable: Produce code debuggable with Java debugger. Implies -g.");
+  UsageError("  --debuggable: Produce code debuggable with Java debugger.");
   UsageError("");
   UsageError("  --native-debuggable: Produce code debuggable with native debugger (like LLDB).");
   UsageError("      Implies --debuggable.");
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index ee7b21c..f922687 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -56,7 +56,7 @@
   // R-type instructions.
   { kRTypeMask, 0, "sll", "DTA", },
   // 0, 1, movci
-  { kRTypeMask, 2, "srl", "DTA", },
+  { kRTypeMask | (0x1f << 21), 2, "srl", "DTA", },
   { kRTypeMask, 3, "sra", "DTA", },
   { kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
   { kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 2cb2212..7170f73 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -378,8 +378,8 @@
           "memory");  // clobber.
 #elif defined(__mips__) && defined(__LP64__)
     __asm__ __volatile__ (
-        // Spill a0-a7 which we say we don't clobber. May contain args.
-        "daddiu $sp, $sp, -64\n\t"
+        // Spill a0-a7 and t0-t3 which we say we don't clobber. May contain args.
+        "daddiu $sp, $sp, -96\n\t"
         "sd $a0, 0($sp)\n\t"
         "sd $a1, 8($sp)\n\t"
         "sd $a2, 16($sp)\n\t"
@@ -388,6 +388,10 @@
         "sd $a5, 40($sp)\n\t"
         "sd $a6, 48($sp)\n\t"
         "sd $a7, 56($sp)\n\t"
+        "sd $t0, 64($sp)\n\t"
+        "sd $t1, 72($sp)\n\t"
+        "sd $t2, 80($sp)\n\t"
+        "sd $t3, 88($sp)\n\t"
 
         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
         "sd %[referrer], 0($sp)\n\t"
@@ -423,13 +427,17 @@
         "ld $a5, 40($sp)\n\t"
         "ld $a6, 48($sp)\n\t"
         "ld $a7, 56($sp)\n\t"
-        "daddiu $sp, $sp, 64\n\t"
+        "ld $t0, 64($sp)\n\t"
+        "ld $t1, 72($sp)\n\t"
+        "ld $t2, 80($sp)\n\t"
+        "ld $t3, 88($sp)\n\t"
+        "daddiu $sp, $sp, 96\n\t"
 
         "move %[result], $v0\n\t"   // Store the call result.
         : [result] "=r" (result)
         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
           [referrer] "r"(referrer), [hidden] "r"(hidden)
-        : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+        : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
           "t8", "t9", "k0", "k1", "fp", "ra",
           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 969f5b9..cea7046 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -158,7 +158,7 @@
 
 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
 using AllocationTrackingMultiMap = std::multimap<
-    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
+    Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
 
 template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
 using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 263f50d..f674a6f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -75,6 +75,7 @@
   kReferenceQueueWeakReferencesLock,
   kReferenceQueueClearedReferencesLock,
   kReferenceProcessorLock,
+  kJitDebugInterfaceLock,
   kJitCodeCacheLock,
   kAllocSpaceLock,
   kBumpPointerSpaceBlockLock,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 47e2e98..e7b4731 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -239,7 +239,7 @@
 }
 
 #if !defined(__clang__)
-#if defined(__arm__)
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
 // TODO: remove when all targets implemented.
 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
 #else
@@ -247,7 +247,7 @@
 #endif
 #else
 // Clang 3.4 fails to build the goto interpreter implementation.
-#if defined(__arm__)
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
 #else
 static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
@@ -294,7 +294,7 @@
         const instrumentation::Instrumentation* const instrumentation =
             Runtime::Current()->GetInstrumentation();
         while (true) {
-          if (instrumentation->IsActive()) {
+          if (instrumentation->IsActive() || !Runtime::Current()->IsStarted()) {
             // TODO: allow JIT profiling instrumentation.  Now, just punt on all instrumentation.
 #if !defined(__clang__)
             return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 3c2898b..f08a1a9 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -16,6 +16,13 @@
 
 #include "debugger_interface.h"
 
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "thread-inl.h"
+#include "thread.h"
+
+#include <unordered_map>
+
 namespace art {
 
 // -------------------------------------------------------------------
@@ -57,13 +64,19 @@
   JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
 }
 
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size) {
+static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+
+static JITCodeEntry* CreateJITCodeEntryInternal(
+    std::unique_ptr<const uint8_t[]> symfile_addr,
+    uintptr_t symfile_size)
+    REQUIRES(g_jit_debug_mutex) {
+  DCHECK(symfile_addr.get() != nullptr);
+
   JITCodeEntry* entry = new JITCodeEntry;
-  entry->symfile_addr_ = symfile_addr;
+  entry->symfile_addr_ = symfile_addr.release();
   entry->symfile_size_ = symfile_size;
   entry->prev_ = nullptr;
 
-  // TODO: Do we need a lock here?
   entry->next_ = __jit_debug_descriptor.first_entry_;
   if (entry->next_ != nullptr) {
     entry->next_->prev_ = entry;
@@ -76,8 +89,7 @@
   return entry;
 }
 
-void DeleteJITCodeEntry(JITCodeEntry* entry) {
-  // TODO: Do we need a lock here?
+static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug_mutex) {
   if (entry->prev_ != nullptr) {
     entry->prev_->next_ = entry->next_;
   } else {
@@ -91,7 +103,48 @@
   __jit_debug_descriptor.relevant_entry_ = entry;
   __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
   __jit_debug_register_code();
+  delete[] entry->symfile_addr_;
   delete entry;
 }
 
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+                                 uintptr_t symfile_size) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, g_jit_debug_mutex);
+  return CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+}
+
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, g_jit_debug_mutex);
+  DeleteJITCodeEntryInternal(entry);
+}
+
+// Mapping from address to entry.  It takes ownership of the entries
+// so that the user of the JIT interface does not have to store them.
+static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
+
+void CreateJITCodeEntryForAddress(uintptr_t address,
+                                  std::unique_ptr<const uint8_t[]> symfile_addr,
+                                  uintptr_t symfile_size) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, g_jit_debug_mutex);
+  DCHECK_NE(address, 0u);
+  DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
+  JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+  g_jit_code_entries.emplace(address, entry);
+}
+
+bool DeleteJITCodeEntryForAddress(uintptr_t address) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, g_jit_debug_mutex);
+  const auto& it = g_jit_code_entries.find(address);
+  if (it == g_jit_code_entries.end()) {
+    return false;
+  }
+  DeleteJITCodeEntryInternal(it->second);
+  g_jit_code_entries.erase(it);
+  return true;
+}
+
 }  // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index a784ef5..74469a9 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 
 #include <inttypes.h>
+#include <memory>
 
 namespace art {
 
@@ -26,11 +27,25 @@
 }
 
 // Notify native debugger about new JITed code by passing in-memory ELF.
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size);
+// It takes ownership of the in-memory ELF file.
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+                                 uintptr_t symfile_size);
 
 // Notify native debugger that JITed code has been removed.
+// It also releases the associated in-memory ELF file.
 void DeleteJITCodeEntry(JITCodeEntry* entry);
 
+// Notify native debugger about new JITed code by passing in-memory ELF.
+// The address is used only to uniquely identify the entry.
+// It takes ownership of the in-memory ELF file.
+void CreateJITCodeEntryForAddress(uintptr_t address,
+                                  std::unique_ptr<const uint8_t[]> symfile_addr,
+                                  uintptr_t symfile_size);
+
+// Notify native debugger that JITed code has been removed.
+// Returns false if entry for the given address was not found.
+bool DeleteJITCodeEntryForAddress(uintptr_t address);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index c260ca4..1ac57b1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -21,6 +21,7 @@
 #include "art_method-inl.h"
 #include "base/stl_util.h"
 #include "base/time_utils.h"
+#include "debugger_interface.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/bitmap-inl.h"
 #include "jit/profiling_info.h"
@@ -215,6 +216,9 @@
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
   const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   const uint8_t* data = method_header->GetNativeGcMap();
+  // Notify native debugger that we are about to remove the code.
+  // It does nothing if we are not using native debugger.
+  DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
   if (data != nullptr) {
     mspace_free(data_mspace_, const_cast<uint8_t*>(data));
   }
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 4e62dda..a8b48ee 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -146,7 +146,7 @@
 
 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
 class AllocationTrackingSafeMap : public SafeMap<
-    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
+    Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>> {
 };
 
 }  // namespace art
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ae18819..fc1a445 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1145,6 +1145,7 @@
 void ThreadList::SuspendAllDaemonThreadsForShutdown() {
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::thread_list_lock_);
+  size_t daemons_left = 0;
   {  // Tell all the daemons it's time to suspend.
     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
     for (const auto& thread : list_) {
@@ -1153,12 +1154,21 @@
       CHECK(thread->IsDaemon()) << *thread;
       if (thread != self) {
         thread->ModifySuspendCount(self, +1, nullptr, false);
+        ++daemons_left;
       }
       // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
       // the sleep forever one.
       thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
     }
   }
+  // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
+  // are about to access runtime state and are not in a runnable state. Examples: Monitor code
+  // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
+  // for daemon threads to be in a blocked state.
+  if (daemons_left > 0) {
+    static constexpr size_t kDaemonSleepTime = 200 * 1000;
+    usleep(kDaemonSleepTime);
+  }
   // Give the threads a chance to suspend, complaining if they're slow.
   bool have_complained = false;
   static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
diff --git a/test/137-cfi/run b/test/137-cfi/run
index ecbbbc7..9c567b6 100755
--- a/test/137-cfi/run
+++ b/test/137-cfi/run
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-exec ${RUN} "$@"
+exec ${RUN} "$@" -Xcompiler-option --generate-debug-info
diff --git a/test/562-no-intermediate/expected.txt b/test/562-no-intermediate/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/562-no-intermediate/expected.txt
diff --git a/test/562-no-intermediate/info.txt b/test/562-no-intermediate/info.txt
new file mode 100644
index 0000000..4f21aeb
--- /dev/null
+++ b/test/562-no-intermediate/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing, checking that there is no
+intermediate address between a Java call.
diff --git a/test/562-no-intermediate/src/Main.java b/test/562-no-intermediate/src/Main.java
new file mode 100644
index 0000000..3b74d6f
--- /dev/null
+++ b/test/562-no-intermediate/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
+  /// CHECK-NOT: IntermediateAddress
+  public static void main(String[] args) {
+    array[index] += Math.cos(42);
+  }
+
+  static int index = 0;
+  static double[] array = new double[2];
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 53d0f10..ee6b7aa 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -267,16 +267,6 @@
 
 TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
 
-# b/26483935
-TEST_ART_BROKEN_HOST_RUN_TESTS := \
-  132-daemon-locks-shutdown \
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,host,$(RUN_TYPES),$(PREBUILD_TYPES), \
-    $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-    $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_HOST_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_HOST_RUN_TESTS :=
-
 # 143-string-value tests for a LOG(E) tag, which is only supported on host.
 TEST_ART_BROKEN_TARGET_RUN_TESTS := \
   143-string-value \