blob: 79a193d6cbb6de3f0ab0cd7eef90df9464b9dfd3 [file] [log] [blame]
Mathias Agopian7c0c3792011-09-05 23:54:55 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/atomics.h>
31#include <sys/system_properties.h>
32#include <sys/mman.h>
33
Mathias Agopian7c0c3792011-09-05 23:54:55 -070034#include <stdint.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <errno.h>
38#include <pthread.h>
39#include <unwind.h>
40#include <unistd.h>
41
Elliott Hugheseb847bc2013-10-09 15:50:50 -070042#include "private/bionic_tls.h"
Elliott Hughes1e980b62013-01-17 18:36:06 -080043#include "debug_mapinfo.h"
44#include "debug_stacktrace.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070045#include "private/libc_logging.h"
Mathias Agopian7c0c3792011-09-05 23:54:55 -070046
47/*
48 * ===========================================================================
49 * Deadlock prediction
50 * ===========================================================================
51 */
52/*
53The idea is to predict the possibility of deadlock by recording the order
54in which locks are acquired. If we see an attempt to acquire a lock
55out of order, we can identify the locks and offending code.
56
57To make this work, we need to keep track of the locks held by each thread,
58and create history trees for each lock. When a thread tries to acquire
59a new lock, we walk through the "history children" of the lock, looking
60for a match with locks the thread already holds. If we find a match,
61it means the thread has made a request that could result in a deadlock.
62
63To support recursive locks, we always allow re-locking a currently-held
64lock, and maintain a recursion depth count.
65
66An ASCII-art example, where letters represent locks:
67
68 A
69 /|\
70 / | \
71 B | D
72 \ |
73 \|
74 C
75
76The above is the tree we'd have after handling lock synchronization
77sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
78a child of B. (The lines represent pointers between parent and child.
79Every node can have multiple parents and multiple children.)
80
81If we hold AC, and want to lock B, we recursively search through B's
82children to see if A or C appears. It does, so we reject the attempt.
83(A straightforward way to implement it: add a link from C to B, then
84determine whether the graph starting at B contains a cycle.)
85
86If we hold AC and want to lock D, we would succeed, creating a new link
87from C to D.
88
89Updates to MutexInfo structs are only allowed for the thread that holds
90the lock, so we actually do most of our deadlock prediction work after
91the lock has been acquired.
92*/
93
94// =============================================================================
95// log functions
96// =============================================================================
97
98#define LOGD(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -080099 __libc_format_log(ANDROID_LOG_DEBUG, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700100
101#define LOGW(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800102 __libc_format_log(ANDROID_LOG_WARN, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700103
104#define LOGE(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800105 __libc_format_log(ANDROID_LOG_ERROR, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700106
107#define LOGI(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800108 __libc_format_log(ANDROID_LOG_INFO, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700109
110static const char* const kStartBanner =
111 "===============================================================";
112
113static const char* const kEndBanner =
114 "===============================================================";
115
Elliott Hughese4ccf5a2013-02-07 12:06:44 -0800116extern const char* __progname;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700117
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700118#define STACK_TRACE_DEPTH 16
119
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700120/****************************************************************************/
121
122/*
123 * level <= 0 : deadlock prediction disabled
124 * level 1 : deadlock prediction enabled, w/o call stacks
125 * level 2 : deadlock prediction enabled w/ call stacks
126 */
127#define CAPTURE_CALLSTACK 2
128static int sPthreadDebugLevel = 0;
129static pid_t sPthreadDebugDisabledThread = -1;
130static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
131
132/****************************************************************************/
133
134/* some simple/lame malloc replacement
135 * NOT thread-safe and leaks everything
136 */
137
138#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
139static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
140static char* sDbgAllocPtr = NULL;
141
Elliott Hughes1e980b62013-01-17 18:36:06 -0800142template <typename T>
143static T* DbgAllocLocked(size_t count = 1) {
144 size_t size = sizeof(T) * count;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700145 if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
146 sDbgAllocOffset = 0;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800147 sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
148 PROT_READ|PROT_WRITE,
149 MAP_ANON | MAP_PRIVATE, 0, 0));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700150 if (sDbgAllocPtr == MAP_FAILED) {
151 return NULL;
152 }
153 }
154 void* addr = sDbgAllocPtr + sDbgAllocOffset;
155 sDbgAllocOffset += size;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800156 return reinterpret_cast<T*>(addr);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700157}
158
159static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
160 void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
161 MAP_ANON | MAP_PRIVATE, 0, 0);
162 if (addr != MAP_FAILED) {
163 if (ptr) {
164 memcpy(addr, ptr, old_size);
165 munmap(ptr, old_size);
166 }
167 } else {
168 addr = NULL;
169 }
170 return addr;
171}
172
173/*****************************************************************************/
174
175struct MutexInfo;
176
177typedef struct CallStack {
Elliott Hughes239e7a02013-01-25 17:13:45 -0800178 uintptr_t depth;
179 uintptr_t* addrs;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700180} CallStack;
181
182typedef struct MutexInfo* MutexInfoListEntry;
183typedef struct CallStack CallStackListEntry;
184
185typedef struct GrowingList {
186 int alloc;
187 int count;
188 union {
189 void* data;
190 MutexInfoListEntry* list;
191 CallStackListEntry* stack;
192 };
193} GrowingList;
194
195typedef GrowingList MutexInfoList;
196typedef GrowingList CallStackList;
197
198typedef struct MutexInfo {
199 // thread currently holding the lock or 0
200 pid_t owner;
201
202 // most-recently-locked doubly-linked list
203 struct MutexInfo* prev;
204 struct MutexInfo* next;
205
206 // for reentrant locks
207 int lockCount;
208 // when looking for loops in the graph, marks visited nodes
209 int historyMark;
210 // the actual mutex
211 pthread_mutex_t* mutex;
212 // list of locks directly acquired AFTER this one in the same thread
213 MutexInfoList children;
214 // list of locks directly acquired BEFORE this one in the same thread
215 MutexInfoList parents;
216 // list of call stacks when a new link is established to this lock form its parent
217 CallStackList stacks;
218 // call stack when this lock was acquired last
219 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800220 uintptr_t stackTrace[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700221} MutexInfo;
222
223static void growingListInit(GrowingList* list) {
224 list->alloc = 0;
225 list->count = 0;
226 list->data = NULL;
227}
228
229static void growingListAdd(GrowingList* pList, size_t objSize) {
230 if (pList->count == pList->alloc) {
231 size_t oldsize = pList->alloc * objSize;
232 pList->alloc += PAGESIZE / objSize;
233 size_t size = pList->alloc * objSize;
234 pList->data = debug_realloc(pList->data, size, oldsize);
235 }
236 pList->count++;
237}
238
239static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
240 object->owner = 0;
241 object->prev = 0;
242 object->next = 0;
243 object->lockCount = 0;
244 object->historyMark = 0;
245 object->mutex = mutex;
246 growingListInit(&object->children);
247 growingListInit(&object->parents);
248 growingListInit(&object->stacks);
249 object->stackDepth = 0;
250}
251
252typedef struct ThreadInfo {
253 pid_t pid;
254 MutexInfo* mrl;
255} ThreadInfo;
256
257static void initThreadInfo(ThreadInfo* object, pid_t pid) {
258 object->pid = pid;
259 object->mrl = NULL;
260}
261
262/****************************************************************************/
263
264static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
265static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
266static void mutex_unlock_checked(MutexInfo* object);
267
268/****************************************************************************/
269
Elliott Hughesf90b95e2013-01-22 11:20:45 -0800270extern "C" int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
271extern "C" int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700272
273static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
274 return pthread_mutex_lock_impl(mutex);
275}
276
277static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
278 return pthread_mutex_unlock_impl(mutex);
279}
280
281/****************************************************************************/
282
Elliott Hughes239e7a02013-01-25 17:13:45 -0800283static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700284 stack->depth = count;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800285 stack->addrs = DbgAllocLocked<uintptr_t>(count);
286 memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700287}
288
289/****************************************************************************/
290
291static int historyListHas(
292 const MutexInfoList* list, MutexInfo const * obj) {
293 int i;
294 for (i=0; i<list->count; i++) {
295 if (list->list[i] == obj) {
296 return i;
297 }
298 }
299 return -1;
300}
301
302static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
303 growingListAdd(pList, sizeof(MutexInfoListEntry));
304 pList->list[pList->count - 1] = obj;
305}
306
307static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
308 int i;
309 for (i = pList->count-1; i >= 0; i--) {
310 if (pList->list[i] == obj) {
311 break;
312 }
313 }
314 if (i < 0) {
315 // not found!
316 return 0;
317 }
318
319 if (i != pList->count-1) {
320 // copy the last entry to the new free slot
321 pList->list[i] = pList->list[pList->count-1];
322 }
323 pList->count--;
324 memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
325 return 1;
326}
327
328static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
329 historyListAdd(&parent->children, child);
330 historyListAdd(&child->parents, parent);
331}
332
333static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
334 historyListRemove(&parent->children, child);
335 historyListRemove(&child->parents, parent);
336}
337
338/****************************************************************************/
339
340static void callstackListAdd(CallStackList* pList,
Elliott Hughes239e7a02013-01-25 17:13:45 -0800341 int count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700342 growingListAdd(pList, sizeof(CallStackListEntry));
343 dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
344}
345
346/****************************************************************************/
347
348/*
349 * Recursively traverse the object hierarchy starting at "obj". We mark
350 * ourselves on entry and clear the mark on exit. If we ever encounter
351 * a marked object, we have a cycle.
352 *
353 * Returns "true" if all is well, "false" if we found a cycle.
354 */
355
356static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
357{
358 /*
359 * Have we been here before?
360 */
361 if (obj->historyMark) {
362 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800363 uintptr_t addrs[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700364
365 /* Turn off prediction temporarily in this thread while logging */
366 sPthreadDebugDisabledThread = gettid();
367
Elliott Hughes35b621c2013-01-28 16:27:36 -0800368 backtrace_startup();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700369
370 LOGW("%s\n", kStartBanner);
371 LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
372 LOGW("Illegal lock attempt:\n");
373 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
374 stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
Elliott Hughes35b621c2013-01-28 16:27:36 -0800375 log_backtrace(addrs, stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700376
377 LOGW("+++ Currently held locks in this thread (in reverse order):");
378 MutexInfo* cur = obj;
379 pid_t ourtid = gettid();
380 int i;
381 for (i=0 ; i<cur->parents.count ; i++) {
382 MutexInfo* parent = cur->parents.list[i];
383 if (parent->owner == ourtid) {
384 LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
385 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800386 log_backtrace(parent->stackTrace, parent->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700387 }
388 cur = parent;
389 break;
390 }
391 }
392
393 LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
394 return 0;
395 }
396
397 obj->historyMark = 1;
398
399 MutexInfoList* pList = &obj->children;
400 int result = 1;
401 int i;
402 for (i = pList->count-1; i >= 0; i--) {
403 MutexInfo* child = pList->list[i];
404 if (!traverseTree(child, obj)) {
405 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
406 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
407 int index = historyListHas(&obj->parents, objParent);
408 if ((size_t)index < (size_t)obj->stacks.count) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800409 log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700410 } else {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800411 log_backtrace(obj->stackTrace, obj->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700412 }
413 }
414 result = 0;
415 break;
416 }
417 }
418
419 obj->historyMark = 0;
420 return result;
421}
422
423/****************************************************************************/
424
425static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
426{
427 pid_t tid = gettid();
428 if (object->owner == tid) {
429 object->lockCount++;
430 return;
431 }
432
433 object->owner = tid;
434 object->lockCount = 0;
435
436 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
437 // always record the call stack when acquiring a lock.
438 // it's not efficient, but is useful during diagnostics
439 object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
440 }
441
442 // no other locks held in this thread -- no deadlock possible!
443 if (mrl == NULL)
444 return;
445
446 // check if the lock we're trying to acquire is a direct descendant of
447 // the most recently locked mutex in this thread, in which case we're
448 // in a good situation -- no deadlock possible
449 if (historyListHas(&mrl->children, object) >= 0)
450 return;
451
452 pthread_mutex_lock_unchecked(&sDbgLock);
453
454 linkParentToChild(mrl, object);
455 if (!traverseTree(object, mrl)) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800456 backtrace_shutdown();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700457 LOGW("%s\n", kEndBanner);
458 unlinkParentFromChild(mrl, object);
459 // reenable pthread debugging for this thread
460 sPthreadDebugDisabledThread = -1;
461 } else {
462 // record the call stack for this link
463 // NOTE: the call stack is added at the same index
464 // as mrl in object->parents[]
465 // ie: object->parents.count == object->stacks.count, which is
466 // also the index.
467 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
468 callstackListAdd(&object->stacks,
469 object->stackDepth, object->stackTrace);
470 }
471 }
472
473 pthread_mutex_unlock_unchecked(&sDbgLock);
474}
475
476static void mutex_unlock_checked(MutexInfo* object)
477{
478 pid_t tid = gettid();
479 if (object->owner == tid) {
480 if (object->lockCount == 0) {
481 object->owner = 0;
482 } else {
483 object->lockCount--;
484 }
485 }
486}
487
488
489// =============================================================================
490// Hash Table functions
491// =============================================================================
492
493/****************************************************************************/
494
495#define HASHTABLE_SIZE 256
496
497typedef struct HashEntry HashEntry;
498struct HashEntry {
499 size_t slot;
500 HashEntry* prev;
501 HashEntry* next;
502 void* data;
503};
504
505typedef struct HashTable HashTable;
506struct HashTable {
507 HashEntry* slots[HASHTABLE_SIZE];
508};
509
510static HashTable sMutexMap;
511static HashTable sThreadMap;
512
513/****************************************************************************/
514
515static uint32_t get_hashcode(void const * key, size_t keySize)
516{
517 uint32_t h = keySize;
518 char const* data = (char const*)key;
519 size_t i;
520 for (i = 0; i < keySize; i++) {
521 h = h * 31 + *data;
522 data++;
523 }
524 return (uint32_t)h;
525}
526
527static size_t get_index(uint32_t h)
528{
529 // We apply this secondary hashing discovered by Doug Lea to defend
530 // against bad hashes.
531 h += ~(h << 9);
532 h ^= (((unsigned int) h) >> 14);
533 h += (h << 4);
534 h ^= (((unsigned int) h) >> 10);
535 return (size_t)h & (HASHTABLE_SIZE - 1);
536}
537
538/****************************************************************************/
539
540static void hashmap_init(HashTable* table) {
541 memset(table, 0, sizeof(HashTable));
542}
543
544static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
545{
546 HashEntry* prev = entry->prev;
547 HashEntry* next = entry->next;
548 if (prev != NULL) entry->prev->next = next;
549 if (next != NULL) entry->next->prev = prev;
550 if (prev == NULL) {
551 // we are the head of the list. set the head to be next
552 table->slots[entry->slot] = entry->next;
553 }
554}
555
556static HashEntry* hashmap_lookup(HashTable* table,
557 void const* key, size_t ksize,
558 int (*equals)(void const* data, void const* key))
559{
560 const uint32_t hash = get_hashcode(key, ksize);
561 const size_t slot = get_index(hash);
562
563 HashEntry* entry = table->slots[slot];
564 while (entry) {
565 if (equals(entry->data, key)) {
566 break;
567 }
568 entry = entry->next;
569 }
570
571 if (entry == NULL) {
572 // create a new entry
Elliott Hughes1e980b62013-01-17 18:36:06 -0800573 entry = DbgAllocLocked<HashEntry>();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700574 entry->data = NULL;
575 entry->slot = slot;
576 entry->prev = NULL;
577 entry->next = table->slots[slot];
578 if (entry->next != NULL) {
579 entry->next->prev = entry;
580 }
581 table->slots[slot] = entry;
582 }
583 return entry;
584}
585
586/****************************************************************************/
587
588static int MutexInfo_equals(void const* data, void const* key) {
589 return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
590}
591
592static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
593{
594 pthread_mutex_lock_unchecked(&sDbgLock);
595
596 HashEntry* entry = hashmap_lookup(&sMutexMap,
597 &mutex, sizeof(mutex),
598 &MutexInfo_equals);
599 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800600 MutexInfo* mutex_info = DbgAllocLocked<MutexInfo>();
601 entry->data = mutex_info;
602 initMutexInfo(mutex_info, mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700603 }
604
605 pthread_mutex_unlock_unchecked(&sDbgLock);
606
607 return (MutexInfo *)entry->data;
608}
609
610/****************************************************************************/
611
612static int ThreadInfo_equals(void const* data, void const* key) {
613 return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
614}
615
616static ThreadInfo* get_thread_info(pid_t pid)
617{
618 pthread_mutex_lock_unchecked(&sDbgLock);
619
620 HashEntry* entry = hashmap_lookup(&sThreadMap,
621 &pid, sizeof(pid),
622 &ThreadInfo_equals);
623 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800624 ThreadInfo* thread_info = DbgAllocLocked<ThreadInfo>();
625 entry->data = thread_info;
626 initThreadInfo(thread_info, pid);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700627 }
628
629 pthread_mutex_unlock_unchecked(&sDbgLock);
630
631 return (ThreadInfo *)entry->data;
632}
633
634static void push_most_recently_locked(MutexInfo* mrl) {
635 ThreadInfo* tinfo = get_thread_info(gettid());
636 mrl->next = NULL;
637 mrl->prev = tinfo->mrl;
638 tinfo->mrl = mrl;
639}
640
641static void remove_most_recently_locked(MutexInfo* mrl) {
642 ThreadInfo* tinfo = get_thread_info(gettid());
643 if (mrl->next) {
644 (mrl->next)->prev = mrl->prev;
645 }
646 if (mrl->prev) {
647 (mrl->prev)->next = mrl->next;
648 }
649 if (tinfo->mrl == mrl) {
650 tinfo->mrl = mrl->next;
651 }
652}
653
654static MutexInfo* get_most_recently_locked() {
655 ThreadInfo* tinfo = get_thread_info(gettid());
656 return tinfo->mrl;
657}
658
659/****************************************************************************/
660
661/* pthread_debug_init() is called from libc_init_dynamic() just
662 * after system properties have been initialized
663 */
664
Elliott Hughes1e980b62013-01-17 18:36:06 -0800665extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700666 char env[PROP_VALUE_MAX];
667 if (__system_property_get("debug.libc.pthread", env)) {
668 int level = atoi(env);
669 if (level) {
670 LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
671 level, getpid(), __progname);
672 hashmap_init(&sMutexMap);
673 sPthreadDebugLevel = level;
674 }
675 }
676}
677
678/*
679 * See if we were allowed to grab the lock at this time. We do it
680 * *after* acquiring the lock, rather than before, so that we can
681 * freely update the MutexInfo struct. This seems counter-intuitive,
682 * but our goal is deadlock *prediction* not deadlock *prevention*.
683 * (If we actually deadlock, the situation is easy to diagnose from
684 * a thread dump, so there's no point making a special effort to do
685 * the checks before the lock is held.)
686 */
687
Elliott Hughes1e980b62013-01-17 18:36:06 -0800688extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700689{
690 if (sPthreadDebugLevel == 0) return;
691 // prediction disabled for this thread
692 if (sPthreadDebugDisabledThread == gettid())
693 return;
694 MutexInfo* object = get_mutex_info(mutex);
695 MutexInfo* mrl = get_most_recently_locked();
696 mutex_lock_checked(mrl, object);
697 push_most_recently_locked(object);
698}
699
700/*
701 * pthread_debug_mutex_unlock_check() must be called with the mutex
702 * still held (ie: before calling the real unlock)
703 */
704
Elliott Hughes1e980b62013-01-17 18:36:06 -0800705extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700706{
707 if (sPthreadDebugLevel == 0) return;
708 // prediction disabled for this thread
709 if (sPthreadDebugDisabledThread == gettid())
710 return;
711 MutexInfo* object = get_mutex_info(mutex);
712 remove_most_recently_locked(object);
713 mutex_unlock_checked(object);
714}