David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "debugger_interface.h" |
| 18 | |
Andreas Gampe | 5794381 | 2017-12-06 21:39:13 -0800 | [diff] [blame] | 19 | #include <android-base/logging.h> |
| 20 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 21 | #include "base/array_ref.h" |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 22 | #include "base/bit_utils.h" |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 23 | #include "base/logging.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 24 | #include "base/mutex.h" |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 25 | #include "base/time_utils.h" |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 26 | #include "base/utils.h" |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 27 | #include "dex/dex_file.h" |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 28 | #include "jit/jit.h" |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 29 | #include "jit/jit_code_cache.h" |
| 30 | #include "jit/jit_memory_region.h" |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 31 | #include "runtime.h" |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 32 | #include "thread-current-inl.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 33 | #include "thread.h" |
| 34 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 35 | #include <atomic> |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 36 | #include <cstddef> |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 37 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 38 | // |
| 39 | // Debug interface for native tools (gdb, lldb, libunwind, simpleperf). |
| 40 | // |
| 41 | // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html |
| 42 | // |
| 43 | // There are two ways for native tools to access the debug data safely: |
| 44 | // |
| 45 | // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code |
| 46 | // method, which is called after every modification of the linked list. |
| 47 | // GDB does this, but it is complex to set up and it stops the process. |
| 48 | // |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 49 | // 2) Asynchronously, by monitoring the action_seqlock_. |
| 50 | // * The seqlock is a monotonically increasing counter which is incremented |
| 51 | // before and after every modification of the linked list. Odd value of |
| 52 | // the counter means the linked list is being modified (it is locked). |
| 53 | // * The tool should read the value of the seqlock both before and after |
| 54 | // copying the linked list. If the seqlock values match and are even, |
| 55 | // the copy is consistent. Otherwise, the reader should try again. |
| 56 | // * Note that using the data directly while is it being modified |
| 57 | // might crash the tool. Therefore, the only safe way is to make |
| 58 | // a copy and use the copy only after the seqlock has been checked. |
| 59 | // * Note that the process might even free and munmap the data while |
| 60 | // it is being copied, therefore the reader should either handle |
| 61 | // SEGV or use OS calls to read the memory (e.g. process_vm_readv). |
| 62 | // * The seqlock can be used to determine the number of modifications of |
| 63 | // the linked list, which can be used to intelligently cache the data. |
| 64 | // Note the possible overflow of the seqlock. It is intentionally |
| 65 | // 32-bit, since 64-bit atomics can be tricky on some architectures. |
| 66 | // * The timestamps on the entry record the time when the entry was |
| 67 | // created which is relevant if the unwinding is not live and is |
| 68 | // postponed until much later. All timestamps must be unique. |
| 69 | // * Memory barriers are used to make it possible to reason about |
| 70 | // the data even when it is being modified (e.g. the process crashed |
| 71 | // while that data was locked, and thus it will be never unlocked). |
| 72 | // * In particular, it should be possible to: |
| 73 | // 1) read the seqlock and then the linked list head pointer. |
| 74 | // 2) copy the entry and check that seqlock has not changed. |
| 75 | // 3) copy the symfile and check that seqlock has not changed. |
| 76 | // 4) go back to step 2 using the next pointer (if non-null). |
| 77 | // This safely creates copy of all symfiles, although other data |
| 78 | // might be inconsistent/unusable (e.g. prev_, action_timestamp_). |
| 79 | // * For full conformance with the C++ memory model, all seqlock |
| 80 | // protected accesses should be atomic. We currently do this in the |
| 81 | // more critical cases. The rest will have to be fixed before |
| 82 | // attempting to run TSAN on this code. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 83 | // |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 84 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 85 | namespace art { |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 86 | |
| 87 | static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock); |
| 88 | static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock); |
| 89 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 90 | extern "C" { |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 91 | enum JITAction { |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 92 | JIT_NOACTION = 0, |
| 93 | JIT_REGISTER_FN, |
| 94 | JIT_UNREGISTER_FN |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 95 | }; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 96 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 97 | // Public/stable binary interface. |
| 98 | struct JITCodeEntryPublic { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 99 | // Atomic to ensure the reader can always iterate over the linked list |
| 100 | // (e.g. the process could crash in the middle of writing this field). |
| 101 | std::atomic<JITCodeEntry*> next_; |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 102 | JITCodeEntry* prev_; // For linked list deletion. Unused in readers. |
| 103 | const uint8_t* symfile_addr_; // Address of the in-memory ELF file. |
| 104 | uint64_t symfile_size_; // Beware of the offset (12 on x86; but 16 on ARM32). |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 105 | |
| 106 | // Android-specific fields: |
| 107 | uint64_t register_timestamp_; // CLOCK_MONOTONIC time of entry registration. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 108 | }; |
| 109 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 110 | // Implementation-specific fields (which can be used only in this file). |
| 111 | struct JITCodeEntry : public JITCodeEntryPublic { |
| 112 | // Unpacked entries: Code address of the symbol in the ELF file. |
| 113 | // Packed entries: The start address of the covered memory range. |
| 114 | const void* addr_ = nullptr; |
| 115 | // Allow merging of ELF files to save space. |
| 116 | // Packing drops advanced DWARF data, so it is not always desirable. |
| 117 | bool allow_packing_ = false; |
| 118 | // Whether this entry has been LZMA compressed. |
| 119 | // Compression is expensive, so we don't always do it. |
| 120 | bool is_compressed_ = false; |
| 121 | }; |
| 122 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 123 | struct JITDescriptor { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 124 | uint32_t version_ = 1; // NB: GDB supports only version 1. |
| 125 | uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values. |
| 126 | JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action. |
| 127 | std::atomic<JITCodeEntry*> head_{nullptr}; // Head of link list of all entries. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 128 | |
| 129 | // Android-specific fields: |
| 130 | uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'}; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 131 | uint32_t flags_ = 0; // Reserved for future use. Must be 0. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 132 | uint32_t sizeof_descriptor = sizeof(JITDescriptor); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 133 | uint32_t sizeof_entry = sizeof(JITCodeEntryPublic); |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 134 | std::atomic_uint32_t action_seqlock_{0}; // Incremented before and after any modification. |
| 135 | uint64_t action_timestamp_ = 1; // CLOCK_MONOTONIC time of last action. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 136 | }; |
| 137 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 138 | // Check that std::atomic has the expected layout. |
| 139 | static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment"); |
| 140 | static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size"); |
| 141 | static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment"); |
| 142 | static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size"); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 143 | |
| 144 | // GDB may set breakpoint here. We must ensure it is not removed or deduplicated. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 145 | void __attribute__((noinline)) __jit_debug_register_code() { |
| 146 | __asm__(""); |
| 147 | } |
| 148 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 149 | // Alternatively, native tools may overwrite this field to execute custom handler. |
David Srbecky | e8b4e85 | 2016-03-15 17:02:41 +0000 | [diff] [blame] | 150 | void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code; |
| 151 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 152 | // The root data structure describing of all JITed methods. |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 153 | JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {}; |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 154 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 155 | // The following globals mirror the ones above, but are used to register dex files. |
| 156 | void __attribute__((noinline)) __dex_debug_register_code() { |
| 157 | __asm__(""); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 158 | } |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 159 | void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code; |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 160 | JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {}; |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 161 | } |
| 162 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 163 | struct DexNativeInfo { |
| 164 | static constexpr bool kCopySymfileData = false; // Just reference DEX files. |
| 165 | static JITDescriptor& Descriptor() { return __dex_debug_descriptor; } |
| 166 | static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); } |
| 167 | static void* Alloc(size_t size) { return malloc(size); } |
| 168 | static void Free(void* ptr) { free(ptr); } |
| 169 | }; |
| 170 | |
| 171 | struct JitNativeInfo { |
| 172 | static constexpr bool kCopySymfileData = true; // Copy debug info to JIT memory. |
| 173 | static JITDescriptor& Descriptor() { return __jit_debug_descriptor; } |
| 174 | static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); } |
| 175 | static void* Alloc(size_t size) { return JitMemory()->AllocateData(size); } |
| 176 | static void Free(void* ptr) { JitMemory()->FreeData(reinterpret_cast<uint8_t*>(ptr)); } |
| 177 | |
| 178 | static jit::JitMemoryRegion* JitMemory() ASSERT_CAPABILITY(Locks::jit_lock_) { |
| 179 | Locks::jit_lock_->AssertHeld(Thread::Current()); |
| 180 | jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache(); |
| 181 | CHECK(jit_code_cache != nullptr); |
| 182 | jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion(); |
| 183 | CHECK(memory->IsValid()); |
| 184 | return memory; |
| 185 | } |
| 186 | }; |
| 187 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 188 | ArrayRef<const uint8_t> GetJITCodeEntrySymFile(JITCodeEntry* entry) { |
| 189 | return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_); |
| 190 | } |
| 191 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 192 | // Mark the descriptor as "locked", so native tools know the data is being modified. |
| 193 | static void ActionSeqlock(JITDescriptor& descriptor) { |
| 194 | DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked"; |
| 195 | descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed); |
| 196 | // Ensure that any writes within the locked section cannot be reordered before the increment. |
| 197 | std::atomic_thread_fence(std::memory_order_release); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 198 | } |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 199 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 200 | // Mark the descriptor as "unlocked", so native tools know the data is safe to read. |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 201 | static void ActionSequnlock(JITDescriptor& descriptor) { |
| 202 | DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked"; |
| 203 | // Ensure that any writes within the locked section cannot be reordered after the increment. |
| 204 | std::atomic_thread_fence(std::memory_order_release); |
| 205 | descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 206 | } |
Vladimir Marko | 93205e3 | 2016-04-13 11:59:46 +0100 | [diff] [blame] | 207 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 208 | template<class NativeInfo> |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 209 | static JITCodeEntry* CreateJITCodeEntryInternal( |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 210 | ArrayRef<const uint8_t> symfile, |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 211 | const void* addr = nullptr, |
| 212 | bool allow_packing = false, |
| 213 | bool is_compressed = false) { |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 214 | JITDescriptor& descriptor = NativeInfo::Descriptor(); |
| 215 | |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 216 | // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry. |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 217 | uint8_t* copy = nullptr; |
| 218 | if (NativeInfo::kCopySymfileData) { |
| 219 | copy = reinterpret_cast<uint8_t*>(NativeInfo::Alloc(symfile.size())); |
| 220 | if (copy == nullptr) { |
| 221 | LOG(ERROR) << "Failed to allocate memory for native debug info"; |
| 222 | return nullptr; |
| 223 | } |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 224 | memcpy(copy, symfile.data(), symfile.size()); |
| 225 | symfile = ArrayRef<const uint8_t>(copy, symfile.size()); |
| 226 | } |
| 227 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 228 | // Ensure the timestamp is monotonically increasing even in presence of low |
| 229 | // granularity system timer. This ensures each entry has unique timestamp. |
| 230 | uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime()); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 231 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 232 | JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed); |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 233 | void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry)); |
| 234 | if (memory == nullptr) { |
| 235 | LOG(ERROR) << "Failed to allocate memory for native debug info"; |
| 236 | if (copy != nullptr) { |
| 237 | NativeInfo::Free(copy); |
| 238 | } |
| 239 | return nullptr; |
| 240 | } |
| 241 | JITCodeEntry* entry = new (memory) JITCodeEntry(); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 242 | entry->symfile_addr_ = symfile.data(); |
Vladimir Marko | 93205e3 | 2016-04-13 11:59:46 +0100 | [diff] [blame] | 243 | entry->symfile_size_ = symfile.size(); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 244 | entry->prev_ = nullptr; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 245 | entry->next_.store(head, std::memory_order_relaxed); |
| 246 | entry->register_timestamp_ = timestamp; |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 247 | entry->addr_ = addr; |
| 248 | entry->allow_packing_ = allow_packing; |
| 249 | entry->is_compressed_ = is_compressed; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 250 | |
| 251 | // We are going to modify the linked list, so take the seqlock. |
| 252 | ActionSeqlock(descriptor); |
| 253 | if (head != nullptr) { |
| 254 | head->prev_ = entry; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 255 | } |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 256 | descriptor.head_.store(entry, std::memory_order_relaxed); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 257 | descriptor.relevant_entry_ = entry; |
| 258 | descriptor.action_flag_ = JIT_REGISTER_FN; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 259 | descriptor.action_timestamp_ = timestamp; |
| 260 | ActionSequnlock(descriptor); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 261 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 262 | NativeInfo::NotifyNativeDebugger(); |
| 263 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 264 | return entry; |
| 265 | } |
| 266 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 267 | template<class NativeInfo> |
| 268 | static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) { |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 269 | CHECK(entry != nullptr); |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 270 | const uint8_t* symfile = entry->symfile_addr_; |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 271 | JITDescriptor& descriptor = NativeInfo::Descriptor(); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 272 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 273 | // Ensure the timestamp is monotonically increasing even in presence of low |
| 274 | // granularity system timer. This ensures each entry has unique timestamp. |
| 275 | uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime()); |
| 276 | |
| 277 | // We are going to modify the linked list, so take the seqlock. |
| 278 | ActionSeqlock(descriptor); |
| 279 | JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 280 | if (entry->prev_ != nullptr) { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 281 | entry->prev_->next_.store(next, std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 282 | } else { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 283 | descriptor.head_.store(next, std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 284 | } |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 285 | if (next != nullptr) { |
| 286 | next->prev_ = entry->prev_; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 287 | } |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 288 | descriptor.relevant_entry_ = entry; |
| 289 | descriptor.action_flag_ = JIT_UNREGISTER_FN; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 290 | descriptor.action_timestamp_ = timestamp; |
| 291 | ActionSequnlock(descriptor); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 292 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 293 | NativeInfo::NotifyNativeDebugger(); |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 294 | |
| 295 | // Ensure that clear below can not be reordered above the unlock above. |
| 296 | std::atomic_thread_fence(std::memory_order_release); |
| 297 | |
| 298 | // Aggressively clear the entry as an extra check of the synchronisation. |
| 299 | memset(entry, 0, sizeof(*entry)); |
| 300 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 301 | NativeInfo::Free(entry); |
| 302 | if (NativeInfo::kCopySymfileData) { |
| 303 | NativeInfo::Free(const_cast<uint8_t*>(symfile)); |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 304 | } |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 305 | } |
| 306 | |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 307 | void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 308 | MutexLock mu(self, g_dex_debug_lock); |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 309 | DCHECK(dexfile != nullptr); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 310 | const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size()); |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 311 | CreateJITCodeEntryInternal<DexNativeInfo>(symfile); |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 312 | } |
| 313 | |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 314 | void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 315 | MutexLock mu(self, g_dex_debug_lock); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 316 | DCHECK(dexfile != nullptr); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 317 | // We register dex files in the class linker and free them in DexFile_closeDexFile, but |
| 318 | // there might be cases where we load the dex file without using it in the class linker. |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 319 | // On the other hand, single dex file might also be used with different class-loaders. |
| 320 | for (JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) { |
| 321 | JITCodeEntry* next = entry->next_; // Save next pointer before we free the memory. |
| 322 | if (entry->symfile_addr_ == dexfile->Begin()) { |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 323 | DeleteJITCodeEntryInternal<DexNativeInfo>(entry); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 324 | } |
| 325 | entry = next; |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 326 | } |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 327 | } |
| 328 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 329 | // Size of JIT code range covered by each packed JITCodeEntry. |
| 330 | static constexpr uint32_t kJitRepackGroupSize = 64 * KB; |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 331 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 332 | // Automatically call the repack method every 'n' new entries. |
| 333 | static constexpr uint32_t kJitRepackFrequency = 64; |
| 334 | static uint32_t g_jit_num_unpacked_entries = 0; |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 335 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 336 | // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group. |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 337 | // The start address of method's code determines which group it belongs to. The end is irrelevant. |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 338 | // New mini debug infos will be merged if possible, and entries for GCed functions will be removed. |
| 339 | static void RepackEntries(bool compress, ArrayRef<const void*> removed) |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 340 | REQUIRES(g_jit_debug_lock) { |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 341 | DCHECK(std::is_sorted(removed.begin(), removed.end())); |
| 342 | jit::Jit* jit = Runtime::Current()->GetJit(); |
| 343 | if (jit == nullptr) { |
| 344 | return; |
| 345 | } |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 346 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 347 | // Collect entries that we want to pack. |
| 348 | std::vector<JITCodeEntry*> entries; |
| 349 | entries.reserve(2 * kJitRepackFrequency); |
| 350 | for (JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) { |
| 351 | if (it->allow_packing_) { |
| 352 | if (!compress && it->is_compressed_ && removed.empty()) { |
| 353 | continue; // If we are not compressing, also avoid decompressing. |
| 354 | } |
| 355 | entries.push_back(it); |
| 356 | } |
| 357 | } |
| 358 | auto cmp = [](JITCodeEntry* lhs, JITCodeEntry* rhs) { return lhs->addr_ < rhs->addr_; }; |
| 359 | std::sort(entries.begin(), entries.end(), cmp); // Sort by address. |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 360 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 361 | // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize). |
| 362 | for (auto group_it = entries.begin(); group_it != entries.end();) { |
| 363 | const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize); |
| 364 | const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize; |
| 365 | |
| 366 | // Find all entries in this group (each entry is an in-memory ELF file). |
| 367 | auto begin = group_it; |
| 368 | auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; }); |
| 369 | CHECK(end > begin); |
| 370 | ArrayRef<JITCodeEntry*> elfs(&*begin, end - begin); |
| 371 | |
| 372 | // Find all symbols that have been removed in this memory range. |
| 373 | auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr); |
| 374 | auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end); |
| 375 | CHECK(removed_end >= removed_begin); |
| 376 | ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin); |
| 377 | |
| 378 | // Bail out early if there is nothing to do for this group. |
| 379 | if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) { |
| 380 | group_it = end; // Go to next group. |
| 381 | continue; |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 382 | } |
David Srbecky | 76b9c69 | 2019-04-01 19:36:33 +0100 | [diff] [blame] | 383 | |
| 384 | // Create new single JITCodeEntry that covers this memory range. |
| 385 | uint64_t start_time = MicroTime(); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 386 | size_t live_symbols; |
| 387 | std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT( |
| 388 | elfs, removed_subset, compress, &live_symbols); |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 389 | VLOG(jit) |
David Srbecky | 76b9c69 | 2019-04-01 19:36:33 +0100 | [diff] [blame] | 390 | << "JIT mini-debug-info repacked" |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 391 | << " for " << group_ptr |
David Srbecky | 76b9c69 | 2019-04-01 19:36:33 +0100 | [diff] [blame] | 392 | << " in " << MicroTime() - start_time << "us" |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 393 | << " elfs=" << elfs.size() |
| 394 | << " dead=" << removed_subset.size() |
| 395 | << " live=" << live_symbols |
| 396 | << " size=" << packed.size() << (compress ? "(lzma)" : ""); |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 397 | |
| 398 | // Replace the old entries with the new one (with their lifetime temporally overlapping). |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 399 | CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed), |
| 400 | /*addr_=*/ group_ptr, |
| 401 | /*allow_packing_=*/ true, |
| 402 | /*is_compressed_=*/ compress); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 403 | for (auto it : elfs) { |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 404 | DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it); |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 405 | } |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 406 | group_it = end; // Go to next group. |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 407 | } |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 408 | g_jit_num_unpacked_entries = 0; |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 409 | } |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 410 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 411 | void AddNativeDebugInfoForJit(const void* code_ptr, |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 412 | const std::vector<uint8_t>& symfile, |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 413 | bool allow_packing) { |
| 414 | MutexLock mu(Thread::Current(), g_jit_debug_lock); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 415 | DCHECK_NE(symfile.size(), 0u); |
| 416 | |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 417 | if (Runtime::Current()->IsZygote()) { |
| 418 | return; // TODO: Implement memory sharing with the zygote process. |
| 419 | } |
| 420 | |
| 421 | CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile), |
| 422 | /*addr=*/ code_ptr, |
| 423 | /*allow_packing=*/ allow_packing, |
| 424 | /*is_compressed=*/ false); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 425 | |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 426 | VLOG(jit) |
| 427 | << "JIT mini-debug-info added" |
| 428 | << " for " << code_ptr |
| 429 | << " size=" << PrettySize(symfile.size()); |
| 430 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 431 | // Automatically repack entries on regular basis to save space. |
David Srbecky | 76b9c69 | 2019-04-01 19:36:33 +0100 | [diff] [blame] | 432 | // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x. |
| 433 | // We delay compression until after GC since it is more expensive (and saves further ~4x). |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 434 | if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) { |
| 435 | RepackEntries(/*compress=*/ false, /*removed=*/ ArrayRef<const void*>()); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 436 | } |
| 437 | } |
| 438 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 439 | void RemoveNativeDebugInfoForJit(ArrayRef<const void*> removed) { |
| 440 | MutexLock mu(Thread::Current(), g_jit_debug_lock); |
| 441 | RepackEntries(/*compress=*/ true, removed); |
| 442 | |
| 443 | // Remove entries which are not allowed to be packed (containing single method each). |
| 444 | for (JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) { |
| 445 | if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) { |
David Srbecky | 9ac8e43 | 2019-08-13 13:16:13 +0100 | [diff] [blame] | 446 | DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 447 | } |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 448 | } |
| 449 | } |
| 450 | |
| 451 | size_t GetJitMiniDebugInfoMemUsage() { |
David Srbecky | 0b21e41 | 2018-12-05 13:24:06 +0000 | [diff] [blame] | 452 | MutexLock mu(Thread::Current(), g_jit_debug_lock); |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 453 | size_t size = 0; |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame] | 454 | for (JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) { |
| 455 | size += sizeof(JITCodeEntry) + it->symfile_size_; |
David Srbecky | afc60cd | 2018-12-05 11:59:31 +0000 | [diff] [blame] | 456 | } |
| 457 | return size; |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 458 | } |
| 459 | |
David Srbecky | 1ed4515 | 2019-04-09 18:10:26 +0100 | [diff] [blame] | 460 | Mutex* GetNativeDebugInfoLock() { |
| 461 | return &g_jit_debug_lock; |
| 462 | } |
| 463 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 464 | } // namespace art |