Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "verification.h" |
| 18 | |
| 19 | #include <iomanip> |
| 20 | #include <sstream> |
| 21 | |
Andreas Gampe | 5e36c2f | 2017-04-21 19:11:15 -0700 | [diff] [blame] | 22 | #include "art_field-inl.h" |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 23 | #include "mirror/class-inl.h" |
Mathieu Chartier | 4f5e3cb | 2017-06-12 13:10:01 -0700 | [diff] [blame^] | 24 | #include "mirror/object-refvisitor-inl.h" |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 25 | |
| 26 | namespace art { |
| 27 | namespace gc { |
| 28 | |
| 29 | std::string Verification::DumpObjectInfo(const void* addr, const char* tag) const { |
| 30 | std::ostringstream oss; |
| 31 | oss << tag << "=" << addr; |
| 32 | if (IsValidHeapObjectAddress(addr)) { |
| 33 | mirror::Object* obj = reinterpret_cast<mirror::Object*>(const_cast<void*>(addr)); |
| 34 | mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>(); |
| 35 | oss << " klass=" << klass; |
| 36 | if (IsValidClass(klass)) { |
| 37 | oss << "(" << klass->PrettyClass() << ")"; |
| 38 | if (klass->IsArrayClass<kVerifyNone, kWithoutReadBarrier>()) { |
| 39 | oss << " length=" << obj->AsArray<kVerifyNone, kWithoutReadBarrier>()->GetLength(); |
| 40 | } |
| 41 | } else { |
| 42 | oss << " <invalid address>"; |
| 43 | } |
| 44 | space::Space* const space = heap_->FindSpaceFromAddress(addr); |
| 45 | if (space != nullptr) { |
| 46 | oss << " space=" << *space; |
| 47 | } |
| 48 | accounting::CardTable* card_table = heap_->GetCardTable(); |
| 49 | if (card_table->AddrIsInCardTable(addr)) { |
| 50 | oss << " card=" << static_cast<size_t>( |
| 51 | card_table->GetCard(reinterpret_cast<const mirror::Object*>(addr))); |
| 52 | } |
| 53 | // Dump adjacent RAM. |
| 54 | const uintptr_t uint_addr = reinterpret_cast<uintptr_t>(addr); |
| 55 | static constexpr size_t kBytesBeforeAfter = 2 * kObjectAlignment; |
| 56 | const uintptr_t dump_start = uint_addr - kBytesBeforeAfter; |
| 57 | const uintptr_t dump_end = uint_addr + kBytesBeforeAfter; |
| 58 | if (dump_start < dump_end && |
| 59 | IsValidHeapObjectAddress(reinterpret_cast<const void*>(dump_start)) && |
| 60 | IsValidHeapObjectAddress(reinterpret_cast<const void*>(dump_end - kObjectAlignment))) { |
| 61 | oss << " adjacent_ram="; |
| 62 | for (uintptr_t p = dump_start; p < dump_end; ++p) { |
| 63 | if (p == uint_addr) { |
| 64 | // Marker of where the object is. |
| 65 | oss << "|"; |
| 66 | } |
| 67 | uint8_t* ptr = reinterpret_cast<uint8_t*>(p); |
| 68 | oss << std::hex << std::setfill('0') << std::setw(2) << static_cast<uintptr_t>(*ptr); |
| 69 | } |
| 70 | } |
| 71 | } else { |
| 72 | oss << " <invalid address>"; |
| 73 | } |
| 74 | return oss.str(); |
| 75 | } |
| 76 | |
| 77 | void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder, |
| 78 | MemberOffset offset, |
| 79 | mirror::Object* ref, |
| 80 | bool fatal) const { |
| 81 | // Lowest priority logging first: |
| 82 | PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); |
| 83 | MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true); |
| 84 | // Buffer the output in the string stream since it is more important than the stack traces |
| 85 | // and we want it to have log priority. The stack traces are printed from Runtime::Abort |
| 86 | // which is called from LOG(FATAL) but before the abort message. |
| 87 | std::ostringstream oss; |
| 88 | oss << "GC tried to mark invalid reference " << ref << std::endl; |
| 89 | oss << DumpObjectInfo(ref, "ref") << "\n"; |
Mathieu Chartier | 4ce0c76 | 2017-05-18 10:01:07 -0700 | [diff] [blame] | 90 | oss << DumpObjectInfo(holder.Ptr(), "holder"); |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 91 | if (holder != nullptr) { |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 92 | mirror::Class* holder_klass = holder->GetClass<kVerifyNone, kWithoutReadBarrier>(); |
| 93 | if (IsValidClass(holder_klass)) { |
| 94 | oss << "field_offset=" << offset.Uint32Value(); |
| 95 | ArtField* field = holder->FindFieldByOffset(offset); |
| 96 | if (field != nullptr) { |
| 97 | oss << " name=" << field->GetName(); |
| 98 | } |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | if (fatal) { |
| 103 | LOG(FATAL) << oss.str(); |
| 104 | } else { |
| 105 | LOG(FATAL_WITHOUT_ABORT) << oss.str(); |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out_space) const { |
| 110 | if (!IsAligned<kObjectAlignment>(addr)) { |
| 111 | return false; |
| 112 | } |
| 113 | space::Space* const space = heap_->FindSpaceFromAddress(addr); |
| 114 | if (space != nullptr) { |
| 115 | if (out_space != nullptr) { |
| 116 | *out_space = space; |
| 117 | } |
| 118 | return true; |
| 119 | } |
| 120 | return false; |
| 121 | } |
| 122 | |
| 123 | bool Verification::IsValidClass(const void* addr) const { |
| 124 | if (!IsValidHeapObjectAddress(addr)) { |
| 125 | return false; |
| 126 | } |
| 127 | mirror::Class* klass = reinterpret_cast<mirror::Class*>(const_cast<void*>(addr)); |
| 128 | mirror::Class* k1 = klass->GetClass<kVerifyNone, kWithoutReadBarrier>(); |
| 129 | if (!IsValidHeapObjectAddress(k1)) { |
| 130 | return false; |
| 131 | } |
| 132 | // k should be class class, take the class again to verify. |
| 133 | // Note that this check may not be valid for the no image space since the class class might move |
| 134 | // around from moving GC. |
| 135 | mirror::Class* k2 = k1->GetClass<kVerifyNone, kWithoutReadBarrier>(); |
| 136 | if (!IsValidHeapObjectAddress(k2)) { |
| 137 | return false; |
| 138 | } |
| 139 | return k1 == k2; |
| 140 | } |
| 141 | |
Mathieu Chartier | 4f5e3cb | 2017-06-12 13:10:01 -0700 | [diff] [blame^] | 142 | using ObjectSet = std::set<mirror::Object*>; |
| 143 | using WorkQueue = std::deque<std::pair<mirror::Object*, std::string>>; |
| 144 | |
| 145 | // Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders. |
| 146 | class Verification::BFSFindReachable { |
| 147 | public: |
| 148 | explicit BFSFindReachable(ObjectSet* visited) : visited_(visited) {} |
| 149 | |
| 150 | void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const |
| 151 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 152 | ArtField* field = obj->FindFieldByOffset(offset); |
| 153 | Visit(obj->GetFieldObject<mirror::Object>(offset), |
| 154 | field != nullptr ? field->GetName() : ""); |
| 155 | } |
| 156 | |
| 157 | void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const |
| 158 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 159 | if (!root->IsNull()) { |
| 160 | VisitRoot(root); |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const |
| 165 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 166 | Visit(root->AsMirrorPtr(), "!nativeRoot"); |
| 167 | } |
| 168 | |
| 169 | void Visit(mirror::Object* ref, const std::string& field_name) const |
| 170 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 171 | if (ref != nullptr && visited_->insert(ref).second) { |
| 172 | new_visited_.emplace_back(ref, field_name); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | const WorkQueue& NewlyVisited() const { |
| 177 | return new_visited_; |
| 178 | } |
| 179 | |
| 180 | private: |
| 181 | ObjectSet* visited_; |
| 182 | mutable WorkQueue new_visited_; |
| 183 | }; |
| 184 | |
| 185 | class Verification::CollectRootVisitor : public SingleRootVisitor { |
| 186 | public: |
| 187 | CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {} |
| 188 | |
| 189 | void VisitRoot(mirror::Object* obj, const RootInfo& info) |
| 190 | OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { |
| 191 | if (obj != nullptr && visited_->insert(obj).second) { |
| 192 | std::ostringstream oss; |
| 193 | oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")"; |
| 194 | work_->emplace_back(obj, oss.str()); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | private: |
| 199 | ObjectSet* const visited_; |
| 200 | WorkQueue* const work_; |
| 201 | }; |
| 202 | |
| 203 | std::string Verification::FirstPathFromRootSet(ObjPtr<mirror::Object> target) const { |
| 204 | Runtime* const runtime = Runtime::Current(); |
| 205 | std::set<mirror::Object*> visited; |
| 206 | std::deque<std::pair<mirror::Object*, std::string>> work; |
| 207 | { |
| 208 | CollectRootVisitor root_visitor(&visited, &work); |
| 209 | runtime->VisitRoots(&root_visitor, kVisitRootFlagAllRoots); |
| 210 | } |
| 211 | while (!work.empty()) { |
| 212 | auto pair = work.front(); |
| 213 | work.pop_front(); |
| 214 | if (pair.first == target) { |
| 215 | return pair.second; |
| 216 | } |
| 217 | BFSFindReachable visitor(&visited); |
| 218 | pair.first->VisitReferences(visitor, VoidFunctor()); |
| 219 | for (auto&& pair2 : visitor.NewlyVisited()) { |
| 220 | std::ostringstream oss; |
| 221 | mirror::Object* obj = pair2.first; |
| 222 | oss << pair.second << " -> " << obj << "(" << obj->PrettyTypeOf() << ")." << pair2.second; |
| 223 | work.emplace_back(obj, oss.str()); |
| 224 | } |
| 225 | } |
| 226 | return "<no path found>"; |
| 227 | } |
| 228 | |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 229 | } // namespace gc |
| 230 | } // namespace art |