blob: 853c0cab964683a0a16f6570ff9c834feb5570be [file] [log] [blame]
David Srbecky67feb172015-12-17 19:57:44 +00001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger_interface.h"
18
Andreas Gampe57943812017-12-06 21:39:13 -080019#include <android-base/logging.h>
20
David Srbecky440a9b32018-02-15 17:47:29 +000021#include "base/array_ref.h"
David Srbecky5cc349f2015-12-18 15:04:48 +000022#include "base/mutex.h"
David Srbecky440a9b32018-02-15 17:47:29 +000023#include "base/time_utils.h"
David Srbeckyafc60cd2018-12-05 11:59:31 +000024#include "dex/dex_file.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070025#include "thread-current-inl.h"
David Srbecky5cc349f2015-12-18 15:04:48 +000026#include "thread.h"
27
David Srbeckyd767f2d2018-02-26 16:18:40 +000028#include <atomic>
David Srbeckyd767f2d2018-02-26 16:18:40 +000029#include <cstddef>
David Srbeckyafc60cd2018-12-05 11:59:31 +000030#include <map>
David Srbecky5cc349f2015-12-18 15:04:48 +000031
David Srbecky440a9b32018-02-15 17:47:29 +000032//
33// Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
34//
35// See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
36//
37// There are two ways for native tools to access the debug data safely:
38//
39// 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
40// method, which is called after every modification of the linked list.
41// GDB does this, but it is complex to set up and it stops the process.
42//
David Srbeckyd767f2d2018-02-26 16:18:40 +000043// 2) Asynchronously, by monitoring the action_seqlock_.
44// * The seqlock is a monotonically increasing counter which is incremented
45// before and after every modification of the linked list. Odd value of
46// the counter means the linked list is being modified (it is locked).
47// * The tool should read the value of the seqlock both before and after
48// copying the linked list. If the seqlock values match and are even,
49// the copy is consistent. Otherwise, the reader should try again.
50// * Note that using the data directly while is it being modified
51// might crash the tool. Therefore, the only safe way is to make
52// a copy and use the copy only after the seqlock has been checked.
53// * Note that the process might even free and munmap the data while
54// it is being copied, therefore the reader should either handle
55// SEGV or use OS calls to read the memory (e.g. process_vm_readv).
56// * The seqlock can be used to determine the number of modifications of
57// the linked list, which can be used to intelligently cache the data.
58// Note the possible overflow of the seqlock. It is intentionally
59// 32-bit, since 64-bit atomics can be tricky on some architectures.
60// * The timestamps on the entry record the time when the entry was
61// created which is relevant if the unwinding is not live and is
62// postponed until much later. All timestamps must be unique.
63// * Memory barriers are used to make it possible to reason about
64// the data even when it is being modified (e.g. the process crashed
65// while that data was locked, and thus it will be never unlocked).
66// * In particular, it should be possible to:
67// 1) read the seqlock and then the linked list head pointer.
68// 2) copy the entry and check that seqlock has not changed.
69// 3) copy the symfile and check that seqlock has not changed.
70// 4) go back to step 2 using the next pointer (if non-null).
71// This safely creates copy of all symfiles, although other data
72// might be inconsistent/unusable (e.g. prev_, action_timestamp_).
73// * For full conformance with the C++ memory model, all seqlock
74// protected accesses should be atomic. We currently do this in the
75// more critical cases. The rest will have to be fixed before
76// attempting to run TSAN on this code.
David Srbecky440a9b32018-02-15 17:47:29 +000077//
David Srbecky67feb172015-12-17 19:57:44 +000078
David Srbecky440a9b32018-02-15 17:47:29 +000079namespace art {
David Srbecky67feb172015-12-17 19:57:44 +000080extern "C" {
Andreas Gampec55bb392018-09-21 00:02:02 +000081 enum JITAction {
David Srbecky67feb172015-12-17 19:57:44 +000082 JIT_NOACTION = 0,
83 JIT_REGISTER_FN,
84 JIT_UNREGISTER_FN
Andreas Gampec55bb392018-09-21 00:02:02 +000085 };
David Srbecky67feb172015-12-17 19:57:44 +000086
87 struct JITCodeEntry {
David Srbeckyd767f2d2018-02-26 16:18:40 +000088 // Atomic to ensure the reader can always iterate over the linked list
89 // (e.g. the process could crash in the middle of writing this field).
90 std::atomic<JITCodeEntry*> next_;
91 // Non-atomic. The reader should not use it. It is only used for deletion.
David Srbecky67feb172015-12-17 19:57:44 +000092 JITCodeEntry* prev_;
David Srbecky440a9b32018-02-15 17:47:29 +000093 const uint8_t* symfile_addr_;
94 uint64_t symfile_size_; // Beware of the offset (12 on x86; but 16 on ARM32).
95
96 // Android-specific fields:
97 uint64_t register_timestamp_; // CLOCK_MONOTONIC time of entry registration.
David Srbecky67feb172015-12-17 19:57:44 +000098 };
99
100 struct JITDescriptor {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000101 uint32_t version_ = 1; // NB: GDB supports only version 1.
102 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
103 JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
104 std::atomic<JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
David Srbecky440a9b32018-02-15 17:47:29 +0000105
106 // Android-specific fields:
107 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'};
David Srbeckyd767f2d2018-02-26 16:18:40 +0000108 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
David Srbecky440a9b32018-02-15 17:47:29 +0000109 uint32_t sizeof_descriptor = sizeof(JITDescriptor);
110 uint32_t sizeof_entry = sizeof(JITCodeEntry);
David Srbeckyd767f2d2018-02-26 16:18:40 +0000111 std::atomic_uint32_t action_seqlock_{0}; // Incremented before and after any modification.
112 uint64_t action_timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
David Srbecky67feb172015-12-17 19:57:44 +0000113 };
114
David Srbeckyd767f2d2018-02-26 16:18:40 +0000115 // Check that std::atomic has the expected layout.
116 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
117 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
118 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
119 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
David Srbecky440a9b32018-02-15 17:47:29 +0000120
121 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
David Srbecky67feb172015-12-17 19:57:44 +0000122 void __attribute__((noinline)) __jit_debug_register_code() {
123 __asm__("");
124 }
125
David Srbecky440a9b32018-02-15 17:47:29 +0000126 // Alternatively, native tools may overwrite this field to execute custom handler.
David Srbeckye8b4e852016-03-15 17:02:41 +0000127 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
128
David Srbecky440a9b32018-02-15 17:47:29 +0000129 // The root data structure describing of all JITed methods.
David Srbeckyafc60cd2018-12-05 11:59:31 +0000130 JITDescriptor __jit_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000131
David Srbecky440a9b32018-02-15 17:47:29 +0000132 // The following globals mirror the ones above, but are used to register dex files.
133 void __attribute__((noinline)) __dex_debug_register_code() {
134 __asm__("");
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000135 }
David Srbecky440a9b32018-02-15 17:47:29 +0000136 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
David Srbeckyafc60cd2018-12-05 11:59:31 +0000137 JITDescriptor __dex_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000138}
139
David Srbeckyd767f2d2018-02-26 16:18:40 +0000140// Mark the descriptor as "locked", so native tools know the data is being modified.
141static void ActionSeqlock(JITDescriptor& descriptor) {
142 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked";
143 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
144 // Ensure that any writes within the locked section cannot be reordered before the increment.
145 std::atomic_thread_fence(std::memory_order_release);
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000146}
David Srbeckyc684f332018-01-19 17:38:06 +0000147
David Srbecky440a9b32018-02-15 17:47:29 +0000148// Mark the descriptor as "unlocked", so native tools know the data is safe to read.
David Srbeckyd767f2d2018-02-26 16:18:40 +0000149static void ActionSequnlock(JITDescriptor& descriptor) {
150 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked";
151 // Ensure that any writes within the locked section cannot be reordered after the increment.
152 std::atomic_thread_fence(std::memory_order_release);
153 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
David Srbecky440a9b32018-02-15 17:47:29 +0000154}
Vladimir Marko93205e32016-04-13 11:59:46 +0100155
David Srbecky440a9b32018-02-15 17:47:29 +0000156static JITCodeEntry* CreateJITCodeEntryInternal(
157 JITDescriptor& descriptor,
158 void (*register_code_ptr)(),
David Srbeckyafc60cd2018-12-05 11:59:31 +0000159 ArrayRef<const uint8_t> symfile,
160 bool copy_symfile)
David Srbecky440a9b32018-02-15 17:47:29 +0000161 REQUIRES(Locks::native_debug_interface_lock_) {
David Srbeckyafc60cd2018-12-05 11:59:31 +0000162 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
163 if (copy_symfile) {
164 uint8_t* copy = new uint8_t[symfile.size()];
165 CHECK(copy != nullptr);
166 memcpy(copy, symfile.data(), symfile.size());
167 symfile = ArrayRef<const uint8_t>(copy, symfile.size());
168 }
169
David Srbeckyd767f2d2018-02-26 16:18:40 +0000170 // Ensure the timestamp is monotonically increasing even in presence of low
171 // granularity system timer. This ensures each entry has unique timestamp.
172 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
David Srbecky5cc349f2015-12-18 15:04:48 +0000173
David Srbeckyd767f2d2018-02-26 16:18:40 +0000174 JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000175 JITCodeEntry* entry = new JITCodeEntry;
Vladimir Marko93205e32016-04-13 11:59:46 +0100176 CHECK(entry != nullptr);
David Srbecky440a9b32018-02-15 17:47:29 +0000177 entry->symfile_addr_ = symfile.data();
Vladimir Marko93205e32016-04-13 11:59:46 +0100178 entry->symfile_size_ = symfile.size();
David Srbecky67feb172015-12-17 19:57:44 +0000179 entry->prev_ = nullptr;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000180 entry->next_.store(head, std::memory_order_relaxed);
181 entry->register_timestamp_ = timestamp;
182
183 // We are going to modify the linked list, so take the seqlock.
184 ActionSeqlock(descriptor);
185 if (head != nullptr) {
186 head->prev_ = entry;
David Srbecky67feb172015-12-17 19:57:44 +0000187 }
David Srbeckyd767f2d2018-02-26 16:18:40 +0000188 descriptor.head_.store(entry, std::memory_order_relaxed);
David Srbecky440a9b32018-02-15 17:47:29 +0000189 descriptor.relevant_entry_ = entry;
190 descriptor.action_flag_ = JIT_REGISTER_FN;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000191 descriptor.action_timestamp_ = timestamp;
192 ActionSequnlock(descriptor);
David Srbecky440a9b32018-02-15 17:47:29 +0000193
194 (*register_code_ptr)();
David Srbecky67feb172015-12-17 19:57:44 +0000195 return entry;
196}
197
David Srbecky440a9b32018-02-15 17:47:29 +0000198static void DeleteJITCodeEntryInternal(
199 JITDescriptor& descriptor,
200 void (*register_code_ptr)(),
David Srbeckyafc60cd2018-12-05 11:59:31 +0000201 JITCodeEntry* entry,
202 bool free_symfile)
David Srbecky440a9b32018-02-15 17:47:29 +0000203 REQUIRES(Locks::native_debug_interface_lock_) {
204 CHECK(entry != nullptr);
David Srbeckyafc60cd2018-12-05 11:59:31 +0000205 const uint8_t* symfile = entry->symfile_addr_;
David Srbecky440a9b32018-02-15 17:47:29 +0000206
David Srbeckyd767f2d2018-02-26 16:18:40 +0000207 // Ensure the timestamp is monotonically increasing even in presence of low
208 // granularity system timer. This ensures each entry has unique timestamp.
209 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
210
211 // We are going to modify the linked list, so take the seqlock.
212 ActionSeqlock(descriptor);
213 JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000214 if (entry->prev_ != nullptr) {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000215 entry->prev_->next_.store(next, std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000216 } else {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000217 descriptor.head_.store(next, std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000218 }
David Srbeckyd767f2d2018-02-26 16:18:40 +0000219 if (next != nullptr) {
220 next->prev_ = entry->prev_;
David Srbecky67feb172015-12-17 19:57:44 +0000221 }
David Srbecky440a9b32018-02-15 17:47:29 +0000222 descriptor.relevant_entry_ = entry;
223 descriptor.action_flag_ = JIT_UNREGISTER_FN;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000224 descriptor.action_timestamp_ = timestamp;
225 ActionSequnlock(descriptor);
David Srbecky440a9b32018-02-15 17:47:29 +0000226
227 (*register_code_ptr)();
David Srbeckyd767f2d2018-02-26 16:18:40 +0000228
229 // Ensure that clear below can not be reordered above the unlock above.
230 std::atomic_thread_fence(std::memory_order_release);
231
232 // Aggressively clear the entry as an extra check of the synchronisation.
233 memset(entry, 0, sizeof(*entry));
234
David Srbecky67feb172015-12-17 19:57:44 +0000235 delete entry;
David Srbeckyafc60cd2018-12-05 11:59:31 +0000236 if (free_symfile) {
237 delete[] symfile;
238 }
David Srbecky67feb172015-12-17 19:57:44 +0000239}
240
David Srbeckyafc60cd2018-12-05 11:59:31 +0000241static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries
242 GUARDED_BY(*Locks::native_debug_interface_lock_);
David Srbeckyc684f332018-01-19 17:38:06 +0000243
David Srbeckyafc60cd2018-12-05 11:59:31 +0000244void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
245 MutexLock mu(self, *Locks::native_debug_interface_lock_);
246 DCHECK(dexfile != nullptr);
David Srbecky440a9b32018-02-15 17:47:29 +0000247 // This is just defensive check. The class linker should not register the dex file twice.
David Srbeckyafc60cd2018-12-05 11:59:31 +0000248 if (g_dex_debug_entries.count(dexfile) == 0) {
249 const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
David Srbecky440a9b32018-02-15 17:47:29 +0000250 JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
251 __dex_debug_register_code_ptr,
David Srbeckyafc60cd2018-12-05 11:59:31 +0000252 symfile,
253 /*copy_symfile=*/ false);
254 g_dex_debug_entries.emplace(dexfile, entry);
David Srbecky5cc349f2015-12-18 15:04:48 +0000255 }
David Srbeckyc684f332018-01-19 17:38:06 +0000256}
257
David Srbeckyafc60cd2018-12-05 11:59:31 +0000258void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
259 MutexLock mu(self, *Locks::native_debug_interface_lock_);
260 auto it = g_dex_debug_entries.find(dexfile);
David Srbecky440a9b32018-02-15 17:47:29 +0000261 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
262 // there might be cases where we load the dex file without using it in the class linker.
David Srbeckyafc60cd2018-12-05 11:59:31 +0000263 if (it != g_dex_debug_entries.end()) {
David Srbecky440a9b32018-02-15 17:47:29 +0000264 DeleteJITCodeEntryInternal(__dex_debug_descriptor,
265 __dex_debug_register_code_ptr,
David Srbeckyafc60cd2018-12-05 11:59:31 +0000266 /*entry=*/ it->second,
267 /*free_symfile=*/ false);
268 g_dex_debug_entries.erase(it);
David Srbecky440a9b32018-02-15 17:47:29 +0000269 }
David Srbeckyc684f332018-01-19 17:38:06 +0000270}
271
David Srbecky440a9b32018-02-15 17:47:29 +0000272// Mapping from handle to entry. Used to manage life-time of the entries.
David Srbeckyafc60cd2018-12-05 11:59:31 +0000273static std::map<const void*, JITCodeEntry*> g_jit_debug_entries
274 GUARDED_BY(*Locks::native_debug_interface_lock_);
David Srbecky440a9b32018-02-15 17:47:29 +0000275
David Srbeckyafc60cd2018-12-05 11:59:31 +0000276void AddNativeDebugInfoForJit(Thread* self,
277 const void* code_ptr,
278 const std::vector<uint8_t>& symfile) {
279 MutexLock mu(self, *Locks::native_debug_interface_lock_);
David Srbecky440a9b32018-02-15 17:47:29 +0000280 DCHECK_NE(symfile.size(), 0u);
281
David Srbecky440a9b32018-02-15 17:47:29 +0000282 JITCodeEntry* entry = CreateJITCodeEntryInternal(
283 __jit_debug_descriptor,
284 __jit_debug_register_code_ptr,
David Srbeckyafc60cd2018-12-05 11:59:31 +0000285 ArrayRef<const uint8_t>(symfile),
286 /*copy_symfile=*/ true);
David Srbecky440a9b32018-02-15 17:47:29 +0000287
David Srbeckyafc60cd2018-12-05 11:59:31 +0000288 // We don't provide code_ptr for type debug info, which means we cannot free it later.
David Srbecky440a9b32018-02-15 17:47:29 +0000289 // (this only happens when --generate-debug-info flag is enabled for the purpose
290 // of being debugged with gdb; it does not happen for debuggable apps by default).
David Srbeckyafc60cd2018-12-05 11:59:31 +0000291 if (code_ptr != nullptr) {
292 bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second;
293 DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr;
David Srbecky440a9b32018-02-15 17:47:29 +0000294 }
295}
296
David Srbeckyafc60cd2018-12-05 11:59:31 +0000297void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
298 MutexLock mu(self, *Locks::native_debug_interface_lock_);
299 auto it = g_jit_debug_entries.find(code_ptr);
300 // We generate JIT native debug info only if the right runtime flags are enabled,
301 // but we try to remove it unconditionally whenever code is freed from JIT cache.
302 if (it != g_jit_debug_entries.end()) {
303 DeleteJITCodeEntryInternal(__jit_debug_descriptor,
304 __jit_debug_register_code_ptr,
305 it->second,
306 /*free_symfile=*/ true);
307 g_jit_debug_entries.erase(it);
308 }
309}
310
311size_t GetJitMiniDebugInfoMemUsage() {
312 MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
313 size_t size = 0;
314 for (auto entry : g_jit_debug_entries) {
315 size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
316 }
317 return size;
David Srbecky5cc349f2015-12-18 15:04:48 +0000318}
319
David Srbecky67feb172015-12-17 19:57:44 +0000320} // namespace art