Add custom SIGSEGV handler to help find heap corruption.

The new signal handler prints heap diagnostics when you get a SIGSEGV.
Added a fault message member in runtime which is modifiable by
Runtime::SetFaultMessage. When you get a SIGSEGV it will print out
whatever is stored in this string as well as the normal information.
This is useful for debugging heap corruption since it lets you see
which threads were in which methods when the last GC occured.

Added some smarter object dumping logic when the faulting address is
in the heap.

Bug: 12934910

Change-Id: Ia72be2c39f70ad711cbd746d66fad2b617d5d29f
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a4c9dea..4668a19 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -61,7 +61,8 @@
 namespace collector {
 
 static constexpr bool kProtectFromSpace = true;
-static constexpr bool kResetFromSpace = true;
+static constexpr bool kClearFromSpace = true;
+static constexpr bool kStoreStackTraces = false;
 
 // TODO: Unduplicate logic.
 void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -169,6 +170,19 @@
 }
 
 void SemiSpace::MarkingPhase() {
+  if (kStoreStackTraces) {
+    Locks::mutator_lock_->AssertExclusiveHeld(self_);
+    // Store the stack traces into the runtime fault string in case we get a heap corruption
+    // related crash later.
+    ThreadState old_state = self_->SetStateUnsafe(kRunnable);
+    std::ostringstream oss;
+    Runtime* runtime = Runtime::Current();
+    runtime->GetThreadList()->DumpForSigQuit(oss);
+    runtime->GetThreadList()->DumpNativeStacks(oss);
+    runtime->SetFaultMessage(oss.str());
+    CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
+  }
+
   if (generational_) {
     if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
         clear_soft_references_) {
@@ -353,19 +367,17 @@
     TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
     GetHeap()->UnBindBitmaps();
   }
-  // Release the memory used by the from space.
-  if (kResetFromSpace) {
-    // Clearing from space.
+  if (kClearFromSpace) {
+    // Release the memory used by the from space.
     from_space_->Clear();
   }
+  from_space_->Reset();
   // Protect the from space.
-  VLOG(heap)
-      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
-      << reinterpret_cast<void*>(from_space_->Limit());
+  VLOG(heap) << "Protecting space " << *from_space_;
   if (kProtectFromSpace) {
-    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+    from_space_->GetMemMap()->Protect(PROT_NONE);
   } else {
-    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+    from_space_->GetMemMap()->Protect(PROT_READ);
   }
   if (saved_bytes_ > 0) {
     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e6d2c2..9ad21cf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -318,6 +318,91 @@
   }
 }
 
+std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
+  if (!IsValidContinuousSpaceObjectAddress(klass)) {
+    return StringPrintf("<non heap address klass %p>", klass);
+  }
+  mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
+  if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
+    std::string result("[");
+    result += SafeGetClassDescriptor(component_type);
+    return result;
+  } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
+    return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
+  } else if (UNLIKELY(klass->IsProxyClass())) {
+    return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
+  } else {
+    mirror::DexCache* dex_cache = klass->GetDexCache();
+    if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
+      return StringPrintf("<non heap address dex_cache %p>", dex_cache);
+    }
+    const DexFile* dex_file = dex_cache->GetDexFile();
+    uint16_t class_def_idx = klass->GetDexClassDefIndex();
+    if (class_def_idx == DexFile::kDexNoIndex16) {
+      return "<class def not found>";
+    }
+    const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+    const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
+    return dex_file->GetTypeDescriptor(type_id);
+  }
+}
+
+std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
+  if (obj == nullptr) {
+    return "null";
+  }
+  mirror::Class* klass = obj->GetClass<kVerifyNone>();
+  if (klass == nullptr) {
+    return "(class=null)";
+  }
+  std::string result(SafeGetClassDescriptor(klass));
+  if (obj->IsClass()) {
+    result += "<" + SafeGetClassDescriptor(obj->AsClass()) + ">";
+  }
+  return result;
+}
+
+void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
+  if (obj == nullptr) {
+    stream << "(obj=null)";
+    return;
+  }
+  if (IsAligned<kObjectAlignment>(obj)) {
+    space::Space* space = nullptr;
+    // Don't use find space since it only finds spaces which actually contain objects instead of
+    // spaces which may contain objects (e.g. cleared bump pointer spaces).
+    for (const auto& cur_space : continuous_spaces_) {
+      if (cur_space->HasAddress(obj)) {
+        space = cur_space;
+        break;
+      }
+    }
+    if (space == nullptr) {
+      if (allocator_mem_map_.get() == nullptr || !allocator_mem_map_->HasAddress(obj)) {
+        stream << "obj " << obj << " not a valid heap address";
+        return;
+      } else if (allocator_mem_map_.get() != nullptr) {
+        allocator_mem_map_->Protect(PROT_READ | PROT_WRITE);
+      }
+    }
+    // Unprotect all the spaces.
+    for (const auto& space : continuous_spaces_) {
+      mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+    }
+    stream << "Object " << obj;
+    if (space != nullptr) {
+      stream << " in space " << *space;
+    }
+    mirror::Class* klass = obj->GetClass();
+    stream << "\nclass=" << klass;
+    if (klass != nullptr) {
+      stream << " type= " << SafePrettyTypeOf(obj);
+    }
+    // Re-protect the address we faulted on.
+    mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
+  }
+}
+
 bool Heap::IsCompilingBoot() const {
   for (const auto& space : continuous_spaces_) {
     if (space->IsImageSpace() || space->IsZygoteSpace()) {
@@ -809,16 +894,23 @@
   if (obj == nullptr) {
     return true;
   }
-  return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+  return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
 }
 
 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
   return FindContinuousSpaceFromObject(obj, true) != nullptr;
 }
 
-bool Heap::IsHeapAddress(const mirror::Object* obj) const {
-  // TODO: This might not work for large objects.
-  return FindSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
+  if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+    return false;
+  }
+  for (const auto& space : continuous_spaces_) {
+    if (space->HasAddress(obj)) {
+      return true;
+    }
+  }
+  return false;
 }
 
 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
@@ -1539,6 +1631,7 @@
 void Heap::SwapSemiSpaces() {
   // Swap the spaces so we allocate into the space which we just evacuated.
   std::swap(bump_pointer_space_, temp_space_);
+  bump_pointer_space_->Clear();
 }
 
 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -1616,7 +1709,7 @@
     CHECK(temp_space_->IsEmpty());
     semi_space_collector_->SetFromSpace(bump_pointer_space_);
     semi_space_collector_->SetToSpace(temp_space_);
-    mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
     collector = semi_space_collector_;
     gc_type = collector::kGcTypeFull;
   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2f227d0..b194d8d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -223,9 +223,6 @@
   bool IsValidObjectAddress(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Returns true if the address passed in is a heap address, doesn't need to be aligned.
-  bool IsHeapAddress(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
   // very slow.
   bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
@@ -519,6 +516,12 @@
 
   void DumpSpaces(std::ostream& stream = LOG(INFO));
 
+  // Dump object should only be used by the signal handler.
+  void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+  // Safe version of pretty type of which check to make sure objects are heap addresses.
+  std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
+  std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+
   // GC performance measuring
   void DumpGcPerformanceInfo(std::ostream& os);
 
@@ -600,6 +603,10 @@
   template <bool kGrow>
   bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
 
+  // Returns true if the address passed in is within the address range of a continuous space.
+  bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Pushes a list of cleared references out to the managed heap.
   void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 43674ea..fcd3b70 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -61,6 +61,9 @@
 void BumpPointerSpace::Clear() {
   // Release the pages back to the operating system.
   CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+}
+
+void BumpPointerSpace::Reset() {
   // Reset the end of the space back to the beginning, we move the end forward as we allocate
   // objects.
   SetEnd(Begin());
@@ -75,8 +78,9 @@
 }
 
 void BumpPointerSpace::Dump(std::ostream& os) const {
-  os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
-     << reinterpret_cast<void*>(Limit());
+  os << GetName() << " "
+      << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+      << reinterpret_cast<void*>(Limit());
 }
 
 mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 476b833..2c9d35f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -92,8 +92,11 @@
     return nullptr;
   }
 
-  // Clear the memory and reset the pointer to the start of the space.
-  void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+  // Madvise the memory back to the OS.
+  void Clear() OVERRIDE;
+
+  // Reset the pointer to the start of the space.
+  void Reset() OVERRIDE LOCKS_EXCLUDED(block_lock_);
 
   void Dump(std::ostream& os) const;
 
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index caedaaf..b591486 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -281,12 +281,15 @@
 }
 
 void DlMallocSpace::Clear() {
-  // TODO: Delete and create new mspace here.
   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
   GetLiveBitmap()->Clear();
   GetMarkBitmap()->Clear();
 }
 
+void DlMallocSpace::Reset() {
+  // TODO: Delete and create new mspace here.
+}
+
 #ifndef NDEBUG
 void DlMallocSpace::CheckMoreCoreForPrecondition() {
   lock_.AssertHeld(Thread::Current());
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ea10ad..4bf16ce 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -113,6 +113,7 @@
   uint64_t GetObjectsAllocated() OVERRIDE;
 
   void Clear() OVERRIDE;
+  void Reset() OVERRIDE;
 
   bool IsDlMallocSpace() const OVERRIDE {
     return true;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index fe8421d..fb621ea 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -304,12 +304,15 @@
 }
 
 void RosAllocSpace::Clear() {
-  // TODO: Delete and create new mspace here.
   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
   GetLiveBitmap()->Clear();
   GetMarkBitmap()->Clear();
 }
 
+void RosAllocSpace::Reset() {
+  // TODO: Delete and create new mspace here.
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index bd32196..5bc425d 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -80,6 +80,7 @@
   void SetFootprintLimit(size_t limit) OVERRIDE;
 
   void Clear() OVERRIDE;
+  void Reset() OVERRIDE;
   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
                               byte* begin, byte* end, byte* limit, size_t growth_limit);
 
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 0f8f38a..37d7c80 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -399,6 +399,9 @@
   // Free all memory associated with this space.
   virtual void Clear() = 0;
 
+  // Reset the space back to an empty space.
+  virtual void Reset() = 0;
+
   accounting::SpaceBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a60ab38..d1c3d03 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -61,6 +61,10 @@
   LOG(FATAL) << "Unimplemented";
 }
 
+void ZygoteSpace::Reset() {
+  LOG(FATAL) << "Unimplemented";
+}
+
 ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
                                  kGcRetentionPolicyFullCollect),
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 8cd1a9f..8880548 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -71,7 +71,8 @@
     return objects_allocated_;
   }
 
-  void Clear();
+  void Clear() OVERRIDE;
+  void Reset() OVERRIDE;
 
  protected:
   virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {