blob: be89b461293070706a53ebbf6190fdecb23ae9f0 [file] [log] [blame]
Mathias Agopian7c0c3792011-09-05 23:54:55 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/atomics.h>
31#include <sys/system_properties.h>
32#include <sys/mman.h>
33
Mathias Agopian7c0c3792011-09-05 23:54:55 -070034#include <stdint.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <errno.h>
38#include <pthread.h>
39#include <unwind.h>
40#include <unistd.h>
41
Elliott Hugheseb847bc2013-10-09 15:50:50 -070042#include "private/bionic_tls.h"
Elliott Hughes1e980b62013-01-17 18:36:06 -080043#include "debug_mapinfo.h"
44#include "debug_stacktrace.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070045#include "private/libc_logging.h"
Mathias Agopian7c0c3792011-09-05 23:54:55 -070046
47/*
48 * ===========================================================================
49 * Deadlock prediction
50 * ===========================================================================
51 */
52/*
53The idea is to predict the possibility of deadlock by recording the order
54in which locks are acquired. If we see an attempt to acquire a lock
55out of order, we can identify the locks and offending code.
56
57To make this work, we need to keep track of the locks held by each thread,
58and create history trees for each lock. When a thread tries to acquire
59a new lock, we walk through the "history children" of the lock, looking
60for a match with locks the thread already holds. If we find a match,
61it means the thread has made a request that could result in a deadlock.
62
63To support recursive locks, we always allow re-locking a currently-held
64lock, and maintain a recursion depth count.
65
66An ASCII-art example, where letters represent locks:
67
68 A
69 /|\
70 / | \
71 B | D
72 \ |
73 \|
74 C
75
76The above is the tree we'd have after handling lock synchronization
77sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
78a child of B. (The lines represent pointers between parent and child.
79Every node can have multiple parents and multiple children.)
80
81If we hold AC, and want to lock B, we recursively search through B's
82children to see if A or C appears. It does, so we reject the attempt.
83(A straightforward way to implement it: add a link from C to B, then
84determine whether the graph starting at B contains a cycle.)
85
86If we hold AC and want to lock D, we would succeed, creating a new link
87from C to D.
88
89Updates to MutexInfo structs are only allowed for the thread that holds
90the lock, so we actually do most of our deadlock prediction work after
91the lock has been acquired.
92*/
93
Elliott Hughese7c59f92013-12-17 20:47:06 -080094#if PTHREAD_DEBUG_ENABLED
95
Mathias Agopian7c0c3792011-09-05 23:54:55 -070096// =============================================================================
97// log functions
98// =============================================================================
99
100#define LOGD(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800101 __libc_format_log(ANDROID_LOG_DEBUG, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700102
103#define LOGW(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800104 __libc_format_log(ANDROID_LOG_WARN, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700105
106#define LOGE(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800107 __libc_format_log(ANDROID_LOG_ERROR, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700108
109#define LOGI(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800110 __libc_format_log(ANDROID_LOG_INFO, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700111
112static const char* const kStartBanner =
113 "===============================================================";
114
115static const char* const kEndBanner =
116 "===============================================================";
117
Elliott Hughese4ccf5a2013-02-07 12:06:44 -0800118extern const char* __progname;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700119
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700120#define STACK_TRACE_DEPTH 16
121
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700122/****************************************************************************/
123
124/*
125 * level <= 0 : deadlock prediction disabled
126 * level 1 : deadlock prediction enabled, w/o call stacks
127 * level 2 : deadlock prediction enabled w/ call stacks
128 */
129#define CAPTURE_CALLSTACK 2
Elliott Hughes1728b232014-05-14 10:02:03 -0700130static int g_pthread_debug_level = 0;
131static pid_t g_pthread_debug_disabled_thread = -1;
132static pthread_mutex_t g_dbg_lock = PTHREAD_MUTEX_INITIALIZER;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700133
134/****************************************************************************/
135
136/* some simple/lame malloc replacement
137 * NOT thread-safe and leaks everything
138 */
139
140#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
Elliott Hughes1728b232014-05-14 10:02:03 -0700141static size_t g_dbg_alloc_offset = DBG_ALLOC_BLOCK_SIZE;
142static char* g_dbg_alloc_ptr = NULL;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700143
Elliott Hughes1e980b62013-01-17 18:36:06 -0800144template <typename T>
145static T* DbgAllocLocked(size_t count = 1) {
146 size_t size = sizeof(T) * count;
Elliott Hughes1728b232014-05-14 10:02:03 -0700147 if ((g_dbg_alloc_offset + size) > DBG_ALLOC_BLOCK_SIZE) {
148 g_dbg_alloc_offset = 0;
149 g_dbg_alloc_ptr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
Elliott Hughes1e980b62013-01-17 18:36:06 -0800150 PROT_READ|PROT_WRITE,
151 MAP_ANON | MAP_PRIVATE, 0, 0));
Elliott Hughes1728b232014-05-14 10:02:03 -0700152 if (g_dbg_alloc_ptr == MAP_FAILED) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700153 return NULL;
154 }
155 }
Elliott Hughes1728b232014-05-14 10:02:03 -0700156 void* addr = g_dbg_alloc_ptr + g_dbg_alloc_offset;
157 g_dbg_alloc_offset += size;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800158 return reinterpret_cast<T*>(addr);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700159}
160
161static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
162 void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
163 MAP_ANON | MAP_PRIVATE, 0, 0);
164 if (addr != MAP_FAILED) {
165 if (ptr) {
166 memcpy(addr, ptr, old_size);
167 munmap(ptr, old_size);
168 }
169 } else {
170 addr = NULL;
171 }
172 return addr;
173}
174
175/*****************************************************************************/
176
177struct MutexInfo;
178
179typedef struct CallStack {
Elliott Hughes239e7a02013-01-25 17:13:45 -0800180 uintptr_t depth;
181 uintptr_t* addrs;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700182} CallStack;
183
184typedef struct MutexInfo* MutexInfoListEntry;
185typedef struct CallStack CallStackListEntry;
186
187typedef struct GrowingList {
188 int alloc;
189 int count;
190 union {
191 void* data;
192 MutexInfoListEntry* list;
193 CallStackListEntry* stack;
194 };
195} GrowingList;
196
197typedef GrowingList MutexInfoList;
198typedef GrowingList CallStackList;
199
200typedef struct MutexInfo {
201 // thread currently holding the lock or 0
202 pid_t owner;
203
204 // most-recently-locked doubly-linked list
205 struct MutexInfo* prev;
206 struct MutexInfo* next;
207
208 // for reentrant locks
209 int lockCount;
210 // when looking for loops in the graph, marks visited nodes
211 int historyMark;
212 // the actual mutex
213 pthread_mutex_t* mutex;
214 // list of locks directly acquired AFTER this one in the same thread
215 MutexInfoList children;
216 // list of locks directly acquired BEFORE this one in the same thread
217 MutexInfoList parents;
218 // list of call stacks when a new link is established to this lock form its parent
219 CallStackList stacks;
220 // call stack when this lock was acquired last
221 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800222 uintptr_t stackTrace[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700223} MutexInfo;
224
225static void growingListInit(GrowingList* list) {
226 list->alloc = 0;
227 list->count = 0;
228 list->data = NULL;
229}
230
231static void growingListAdd(GrowingList* pList, size_t objSize) {
232 if (pList->count == pList->alloc) {
233 size_t oldsize = pList->alloc * objSize;
234 pList->alloc += PAGESIZE / objSize;
235 size_t size = pList->alloc * objSize;
236 pList->data = debug_realloc(pList->data, size, oldsize);
237 }
238 pList->count++;
239}
240
241static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
242 object->owner = 0;
243 object->prev = 0;
244 object->next = 0;
245 object->lockCount = 0;
246 object->historyMark = 0;
247 object->mutex = mutex;
248 growingListInit(&object->children);
249 growingListInit(&object->parents);
250 growingListInit(&object->stacks);
251 object->stackDepth = 0;
252}
253
254typedef struct ThreadInfo {
255 pid_t pid;
256 MutexInfo* mrl;
257} ThreadInfo;
258
259static void initThreadInfo(ThreadInfo* object, pid_t pid) {
260 object->pid = pid;
261 object->mrl = NULL;
262}
263
264/****************************************************************************/
265
266static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
267static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
268static void mutex_unlock_checked(MutexInfo* object);
269
270/****************************************************************************/
271
Elliott Hughesf90b95e2013-01-22 11:20:45 -0800272extern "C" int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
273extern "C" int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700274
275static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
276 return pthread_mutex_lock_impl(mutex);
277}
278
279static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
280 return pthread_mutex_unlock_impl(mutex);
281}
282
283/****************************************************************************/
284
Elliott Hughes239e7a02013-01-25 17:13:45 -0800285static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700286 stack->depth = count;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800287 stack->addrs = DbgAllocLocked<uintptr_t>(count);
288 memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700289}
290
291/****************************************************************************/
292
293static int historyListHas(
294 const MutexInfoList* list, MutexInfo const * obj) {
295 int i;
296 for (i=0; i<list->count; i++) {
297 if (list->list[i] == obj) {
298 return i;
299 }
300 }
301 return -1;
302}
303
304static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
305 growingListAdd(pList, sizeof(MutexInfoListEntry));
306 pList->list[pList->count - 1] = obj;
307}
308
309static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
310 int i;
311 for (i = pList->count-1; i >= 0; i--) {
312 if (pList->list[i] == obj) {
313 break;
314 }
315 }
316 if (i < 0) {
317 // not found!
318 return 0;
319 }
320
321 if (i != pList->count-1) {
322 // copy the last entry to the new free slot
323 pList->list[i] = pList->list[pList->count-1];
324 }
325 pList->count--;
326 memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
327 return 1;
328}
329
330static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
331 historyListAdd(&parent->children, child);
332 historyListAdd(&child->parents, parent);
333}
334
335static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
336 historyListRemove(&parent->children, child);
337 historyListRemove(&child->parents, parent);
338}
339
340/****************************************************************************/
341
342static void callstackListAdd(CallStackList* pList,
Elliott Hughes239e7a02013-01-25 17:13:45 -0800343 int count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700344 growingListAdd(pList, sizeof(CallStackListEntry));
345 dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
346}
347
348/****************************************************************************/
349
350/*
351 * Recursively traverse the object hierarchy starting at "obj". We mark
352 * ourselves on entry and clear the mark on exit. If we ever encounter
353 * a marked object, we have a cycle.
354 *
355 * Returns "true" if all is well, "false" if we found a cycle.
356 */
357
358static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
359{
360 /*
361 * Have we been here before?
362 */
363 if (obj->historyMark) {
364 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800365 uintptr_t addrs[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700366
367 /* Turn off prediction temporarily in this thread while logging */
Elliott Hughes1728b232014-05-14 10:02:03 -0700368 g_pthread_debug_disabled_thread = gettid();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700369
Elliott Hughes35b621c2013-01-28 16:27:36 -0800370 backtrace_startup();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700371
372 LOGW("%s\n", kStartBanner);
373 LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
374 LOGW("Illegal lock attempt:\n");
375 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
376 stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
Elliott Hughes35b621c2013-01-28 16:27:36 -0800377 log_backtrace(addrs, stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700378
379 LOGW("+++ Currently held locks in this thread (in reverse order):");
380 MutexInfo* cur = obj;
381 pid_t ourtid = gettid();
382 int i;
383 for (i=0 ; i<cur->parents.count ; i++) {
384 MutexInfo* parent = cur->parents.list[i];
385 if (parent->owner == ourtid) {
386 LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
Elliott Hughes1728b232014-05-14 10:02:03 -0700387 if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800388 log_backtrace(parent->stackTrace, parent->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700389 }
390 cur = parent;
391 break;
392 }
393 }
394
395 LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
396 return 0;
397 }
398
399 obj->historyMark = 1;
400
401 MutexInfoList* pList = &obj->children;
402 int result = 1;
403 int i;
404 for (i = pList->count-1; i >= 0; i--) {
405 MutexInfo* child = pList->list[i];
406 if (!traverseTree(child, obj)) {
407 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
Elliott Hughes1728b232014-05-14 10:02:03 -0700408 if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700409 int index = historyListHas(&obj->parents, objParent);
410 if ((size_t)index < (size_t)obj->stacks.count) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800411 log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700412 } else {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800413 log_backtrace(obj->stackTrace, obj->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700414 }
415 }
416 result = 0;
417 break;
418 }
419 }
420
421 obj->historyMark = 0;
422 return result;
423}
424
425/****************************************************************************/
426
427static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
428{
429 pid_t tid = gettid();
430 if (object->owner == tid) {
431 object->lockCount++;
432 return;
433 }
434
435 object->owner = tid;
436 object->lockCount = 0;
437
Elliott Hughes1728b232014-05-14 10:02:03 -0700438 if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700439 // always record the call stack when acquiring a lock.
440 // it's not efficient, but is useful during diagnostics
441 object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
442 }
443
444 // no other locks held in this thread -- no deadlock possible!
445 if (mrl == NULL)
446 return;
447
448 // check if the lock we're trying to acquire is a direct descendant of
449 // the most recently locked mutex in this thread, in which case we're
450 // in a good situation -- no deadlock possible
451 if (historyListHas(&mrl->children, object) >= 0)
452 return;
453
Elliott Hughes1728b232014-05-14 10:02:03 -0700454 pthread_mutex_lock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700455
456 linkParentToChild(mrl, object);
457 if (!traverseTree(object, mrl)) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800458 backtrace_shutdown();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700459 LOGW("%s\n", kEndBanner);
460 unlinkParentFromChild(mrl, object);
461 // reenable pthread debugging for this thread
Elliott Hughes1728b232014-05-14 10:02:03 -0700462 g_pthread_debug_disabled_thread = -1;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700463 } else {
464 // record the call stack for this link
465 // NOTE: the call stack is added at the same index
466 // as mrl in object->parents[]
467 // ie: object->parents.count == object->stacks.count, which is
468 // also the index.
Elliott Hughes1728b232014-05-14 10:02:03 -0700469 if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700470 callstackListAdd(&object->stacks,
471 object->stackDepth, object->stackTrace);
472 }
473 }
474
Elliott Hughes1728b232014-05-14 10:02:03 -0700475 pthread_mutex_unlock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700476}
477
478static void mutex_unlock_checked(MutexInfo* object)
479{
480 pid_t tid = gettid();
481 if (object->owner == tid) {
482 if (object->lockCount == 0) {
483 object->owner = 0;
484 } else {
485 object->lockCount--;
486 }
487 }
488}
489
490
491// =============================================================================
492// Hash Table functions
493// =============================================================================
494
495/****************************************************************************/
496
497#define HASHTABLE_SIZE 256
498
499typedef struct HashEntry HashEntry;
500struct HashEntry {
501 size_t slot;
502 HashEntry* prev;
503 HashEntry* next;
504 void* data;
505};
506
507typedef struct HashTable HashTable;
508struct HashTable {
509 HashEntry* slots[HASHTABLE_SIZE];
510};
511
Elliott Hughes1728b232014-05-14 10:02:03 -0700512static HashTable g_mutex_map;
513static HashTable g_thread_map;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700514
515/****************************************************************************/
516
517static uint32_t get_hashcode(void const * key, size_t keySize)
518{
519 uint32_t h = keySize;
520 char const* data = (char const*)key;
521 size_t i;
522 for (i = 0; i < keySize; i++) {
523 h = h * 31 + *data;
524 data++;
525 }
526 return (uint32_t)h;
527}
528
529static size_t get_index(uint32_t h)
530{
531 // We apply this secondary hashing discovered by Doug Lea to defend
532 // against bad hashes.
533 h += ~(h << 9);
534 h ^= (((unsigned int) h) >> 14);
535 h += (h << 4);
536 h ^= (((unsigned int) h) >> 10);
537 return (size_t)h & (HASHTABLE_SIZE - 1);
538}
539
540/****************************************************************************/
541
542static void hashmap_init(HashTable* table) {
543 memset(table, 0, sizeof(HashTable));
544}
545
546static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
547{
548 HashEntry* prev = entry->prev;
549 HashEntry* next = entry->next;
550 if (prev != NULL) entry->prev->next = next;
551 if (next != NULL) entry->next->prev = prev;
552 if (prev == NULL) {
553 // we are the head of the list. set the head to be next
554 table->slots[entry->slot] = entry->next;
555 }
556}
557
558static HashEntry* hashmap_lookup(HashTable* table,
559 void const* key, size_t ksize,
560 int (*equals)(void const* data, void const* key))
561{
562 const uint32_t hash = get_hashcode(key, ksize);
563 const size_t slot = get_index(hash);
564
565 HashEntry* entry = table->slots[slot];
566 while (entry) {
567 if (equals(entry->data, key)) {
568 break;
569 }
570 entry = entry->next;
571 }
572
573 if (entry == NULL) {
574 // create a new entry
Elliott Hughes1e980b62013-01-17 18:36:06 -0800575 entry = DbgAllocLocked<HashEntry>();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700576 entry->data = NULL;
577 entry->slot = slot;
578 entry->prev = NULL;
579 entry->next = table->slots[slot];
580 if (entry->next != NULL) {
581 entry->next->prev = entry;
582 }
583 table->slots[slot] = entry;
584 }
585 return entry;
586}
587
588/****************************************************************************/
589
590static int MutexInfo_equals(void const* data, void const* key) {
591 return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
592}
593
594static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
595{
Elliott Hughes1728b232014-05-14 10:02:03 -0700596 pthread_mutex_lock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700597
Elliott Hughes1728b232014-05-14 10:02:03 -0700598 HashEntry* entry = hashmap_lookup(&g_mutex_map,
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700599 &mutex, sizeof(mutex),
600 &MutexInfo_equals);
601 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800602 MutexInfo* mutex_info = DbgAllocLocked<MutexInfo>();
603 entry->data = mutex_info;
604 initMutexInfo(mutex_info, mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700605 }
606
Elliott Hughes1728b232014-05-14 10:02:03 -0700607 pthread_mutex_unlock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700608
609 return (MutexInfo *)entry->data;
610}
611
612/****************************************************************************/
613
614static int ThreadInfo_equals(void const* data, void const* key) {
615 return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
616}
617
618static ThreadInfo* get_thread_info(pid_t pid)
619{
Elliott Hughes1728b232014-05-14 10:02:03 -0700620 pthread_mutex_lock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700621
Elliott Hughes1728b232014-05-14 10:02:03 -0700622 HashEntry* entry = hashmap_lookup(&g_thread_map,
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700623 &pid, sizeof(pid),
624 &ThreadInfo_equals);
625 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800626 ThreadInfo* thread_info = DbgAllocLocked<ThreadInfo>();
627 entry->data = thread_info;
628 initThreadInfo(thread_info, pid);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700629 }
630
Elliott Hughes1728b232014-05-14 10:02:03 -0700631 pthread_mutex_unlock_unchecked(&g_dbg_lock);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700632
633 return (ThreadInfo *)entry->data;
634}
635
636static void push_most_recently_locked(MutexInfo* mrl) {
637 ThreadInfo* tinfo = get_thread_info(gettid());
638 mrl->next = NULL;
639 mrl->prev = tinfo->mrl;
640 tinfo->mrl = mrl;
641}
642
643static void remove_most_recently_locked(MutexInfo* mrl) {
644 ThreadInfo* tinfo = get_thread_info(gettid());
645 if (mrl->next) {
646 (mrl->next)->prev = mrl->prev;
647 }
648 if (mrl->prev) {
649 (mrl->prev)->next = mrl->next;
650 }
651 if (tinfo->mrl == mrl) {
652 tinfo->mrl = mrl->next;
653 }
654}
655
656static MutexInfo* get_most_recently_locked() {
657 ThreadInfo* tinfo = get_thread_info(gettid());
658 return tinfo->mrl;
659}
660
661/****************************************************************************/
662
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700663/*
664 * See if we were allowed to grab the lock at this time. We do it
665 * *after* acquiring the lock, rather than before, so that we can
666 * freely update the MutexInfo struct. This seems counter-intuitive,
667 * but our goal is deadlock *prediction* not deadlock *prevention*.
668 * (If we actually deadlock, the situation is easy to diagnose from
669 * a thread dump, so there's no point making a special effort to do
670 * the checks before the lock is held.)
671 */
672
Elliott Hughes1e980b62013-01-17 18:36:06 -0800673extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700674{
Elliott Hughes1728b232014-05-14 10:02:03 -0700675 if (g_pthread_debug_level == 0) return;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700676 // prediction disabled for this thread
Elliott Hughes1728b232014-05-14 10:02:03 -0700677 if (g_pthread_debug_disabled_thread == gettid())
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700678 return;
679 MutexInfo* object = get_mutex_info(mutex);
680 MutexInfo* mrl = get_most_recently_locked();
681 mutex_lock_checked(mrl, object);
682 push_most_recently_locked(object);
683}
684
685/*
686 * pthread_debug_mutex_unlock_check() must be called with the mutex
687 * still held (ie: before calling the real unlock)
688 */
689
Elliott Hughes1e980b62013-01-17 18:36:06 -0800690extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700691{
Elliott Hughes1728b232014-05-14 10:02:03 -0700692 if (g_pthread_debug_level == 0) return;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700693 // prediction disabled for this thread
Elliott Hughes1728b232014-05-14 10:02:03 -0700694 if (g_pthread_debug_disabled_thread == gettid())
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700695 return;
696 MutexInfo* object = get_mutex_info(mutex);
697 remove_most_recently_locked(object);
698 mutex_unlock_checked(object);
699}
Elliott Hughese7c59f92013-12-17 20:47:06 -0800700
701#endif // PTHREAD_DEBUG_ENABLED
702
703// Called from libc_init_dynamic() just after system properties have been initialized.
704extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
705#if PTHREAD_DEBUG_ENABLED
706 char env[PROP_VALUE_MAX];
707 if (__system_property_get("debug.libc.pthread", env)) {
708 int level = atoi(env);
709 if (level) {
710 LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
711 level, getpid(), __progname);
Elliott Hughes1728b232014-05-14 10:02:03 -0700712 hashmap_init(&g_mutex_map);
713 g_pthread_debug_level = level;
Elliott Hughese7c59f92013-12-17 20:47:06 -0800714 }
715 }
716#endif
717}