blob: f8f0c5953584434e8de3b53b9eb46997964379c5 [file] [log] [blame]
Mathias Agopian7c0c3792011-09-05 23:54:55 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/atomics.h>
31#include <sys/system_properties.h>
32#include <sys/mman.h>
33
Elliott Hughes1e980b62013-01-17 18:36:06 -080034//#include <dlfcn.h>
Mathias Agopian7c0c3792011-09-05 23:54:55 -070035#include <stdint.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <errno.h>
39#include <pthread.h>
40#include <unwind.h>
41#include <unistd.h>
42
Mathias Agopian7c0c3792011-09-05 23:54:55 -070043#include "bionic_tls.h"
Elliott Hughes1e980b62013-01-17 18:36:06 -080044#include "debug_mapinfo.h"
45#include "debug_stacktrace.h"
46#include "logd.h"
47
48#include <private/debug_format.h>
Mathias Agopian7c0c3792011-09-05 23:54:55 -070049
50/*
51 * ===========================================================================
52 * Deadlock prediction
53 * ===========================================================================
54 */
55/*
56The idea is to predict the possibility of deadlock by recording the order
57in which locks are acquired. If we see an attempt to acquire a lock
58out of order, we can identify the locks and offending code.
59
60To make this work, we need to keep track of the locks held by each thread,
61and create history trees for each lock. When a thread tries to acquire
62a new lock, we walk through the "history children" of the lock, looking
63for a match with locks the thread already holds. If we find a match,
64it means the thread has made a request that could result in a deadlock.
65
66To support recursive locks, we always allow re-locking a currently-held
67lock, and maintain a recursion depth count.
68
69An ASCII-art example, where letters represent locks:
70
71 A
72 /|\
73 / | \
74 B | D
75 \ |
76 \|
77 C
78
79The above is the tree we'd have after handling lock synchronization
80sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
81a child of B. (The lines represent pointers between parent and child.
82Every node can have multiple parents and multiple children.)
83
84If we hold AC, and want to lock B, we recursively search through B's
85children to see if A or C appears. It does, so we reject the attempt.
86(A straightforward way to implement it: add a link from C to B, then
87determine whether the graph starting at B contains a cycle.)
88
89If we hold AC and want to lock D, we would succeed, creating a new link
90from C to D.
91
92Updates to MutexInfo structs are only allowed for the thread that holds
93the lock, so we actually do most of our deadlock prediction work after
94the lock has been acquired.
95*/
96
97// =============================================================================
98// log functions
99// =============================================================================
100
101#define LOGD(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800102 __libc_format_log(ANDROID_LOG_DEBUG, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700103
104#define LOGW(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800105 __libc_format_log(ANDROID_LOG_WARN, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700106
107#define LOGE(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800108 __libc_format_log(ANDROID_LOG_ERROR, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700109
110#define LOGI(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800111 __libc_format_log(ANDROID_LOG_INFO, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700112
113static const char* const kStartBanner =
114 "===============================================================";
115
116static const char* const kEndBanner =
117 "===============================================================";
118
Elliott Hughese4ccf5a2013-02-07 12:06:44 -0800119extern const char* __progname;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700120
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700121#define STACK_TRACE_DEPTH 16
122
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700123/****************************************************************************/
124
125/*
126 * level <= 0 : deadlock prediction disabled
127 * level 1 : deadlock prediction enabled, w/o call stacks
128 * level 2 : deadlock prediction enabled w/ call stacks
129 */
130#define CAPTURE_CALLSTACK 2
131static int sPthreadDebugLevel = 0;
132static pid_t sPthreadDebugDisabledThread = -1;
133static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
134
135/****************************************************************************/
136
137/* some simple/lame malloc replacement
138 * NOT thread-safe and leaks everything
139 */
140
141#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
142static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
143static char* sDbgAllocPtr = NULL;
144
Elliott Hughes1e980b62013-01-17 18:36:06 -0800145template <typename T>
146static T* DbgAllocLocked(size_t count = 1) {
147 size_t size = sizeof(T) * count;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700148 if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
149 sDbgAllocOffset = 0;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800150 sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
151 PROT_READ|PROT_WRITE,
152 MAP_ANON | MAP_PRIVATE, 0, 0));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700153 if (sDbgAllocPtr == MAP_FAILED) {
154 return NULL;
155 }
156 }
157 void* addr = sDbgAllocPtr + sDbgAllocOffset;
158 sDbgAllocOffset += size;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800159 return reinterpret_cast<T*>(addr);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700160}
161
162static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
163 void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
164 MAP_ANON | MAP_PRIVATE, 0, 0);
165 if (addr != MAP_FAILED) {
166 if (ptr) {
167 memcpy(addr, ptr, old_size);
168 munmap(ptr, old_size);
169 }
170 } else {
171 addr = NULL;
172 }
173 return addr;
174}
175
176/*****************************************************************************/
177
178struct MutexInfo;
179
180typedef struct CallStack {
Elliott Hughes239e7a02013-01-25 17:13:45 -0800181 uintptr_t depth;
182 uintptr_t* addrs;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700183} CallStack;
184
185typedef struct MutexInfo* MutexInfoListEntry;
186typedef struct CallStack CallStackListEntry;
187
188typedef struct GrowingList {
189 int alloc;
190 int count;
191 union {
192 void* data;
193 MutexInfoListEntry* list;
194 CallStackListEntry* stack;
195 };
196} GrowingList;
197
198typedef GrowingList MutexInfoList;
199typedef GrowingList CallStackList;
200
201typedef struct MutexInfo {
202 // thread currently holding the lock or 0
203 pid_t owner;
204
205 // most-recently-locked doubly-linked list
206 struct MutexInfo* prev;
207 struct MutexInfo* next;
208
209 // for reentrant locks
210 int lockCount;
211 // when looking for loops in the graph, marks visited nodes
212 int historyMark;
213 // the actual mutex
214 pthread_mutex_t* mutex;
215 // list of locks directly acquired AFTER this one in the same thread
216 MutexInfoList children;
217 // list of locks directly acquired BEFORE this one in the same thread
218 MutexInfoList parents;
219 // list of call stacks when a new link is established to this lock form its parent
220 CallStackList stacks;
221 // call stack when this lock was acquired last
222 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800223 uintptr_t stackTrace[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700224} MutexInfo;
225
226static void growingListInit(GrowingList* list) {
227 list->alloc = 0;
228 list->count = 0;
229 list->data = NULL;
230}
231
232static void growingListAdd(GrowingList* pList, size_t objSize) {
233 if (pList->count == pList->alloc) {
234 size_t oldsize = pList->alloc * objSize;
235 pList->alloc += PAGESIZE / objSize;
236 size_t size = pList->alloc * objSize;
237 pList->data = debug_realloc(pList->data, size, oldsize);
238 }
239 pList->count++;
240}
241
242static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
243 object->owner = 0;
244 object->prev = 0;
245 object->next = 0;
246 object->lockCount = 0;
247 object->historyMark = 0;
248 object->mutex = mutex;
249 growingListInit(&object->children);
250 growingListInit(&object->parents);
251 growingListInit(&object->stacks);
252 object->stackDepth = 0;
253}
254
255typedef struct ThreadInfo {
256 pid_t pid;
257 MutexInfo* mrl;
258} ThreadInfo;
259
260static void initThreadInfo(ThreadInfo* object, pid_t pid) {
261 object->pid = pid;
262 object->mrl = NULL;
263}
264
265/****************************************************************************/
266
267static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
268static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
269static void mutex_unlock_checked(MutexInfo* object);
270
271/****************************************************************************/
272
Elliott Hughesf90b95e2013-01-22 11:20:45 -0800273extern "C" int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
274extern "C" int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700275
276static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
277 return pthread_mutex_lock_impl(mutex);
278}
279
280static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
281 return pthread_mutex_unlock_impl(mutex);
282}
283
284/****************************************************************************/
285
Elliott Hughes239e7a02013-01-25 17:13:45 -0800286static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700287 stack->depth = count;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800288 stack->addrs = DbgAllocLocked<uintptr_t>(count);
289 memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700290}
291
292/****************************************************************************/
293
294static int historyListHas(
295 const MutexInfoList* list, MutexInfo const * obj) {
296 int i;
297 for (i=0; i<list->count; i++) {
298 if (list->list[i] == obj) {
299 return i;
300 }
301 }
302 return -1;
303}
304
305static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
306 growingListAdd(pList, sizeof(MutexInfoListEntry));
307 pList->list[pList->count - 1] = obj;
308}
309
310static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
311 int i;
312 for (i = pList->count-1; i >= 0; i--) {
313 if (pList->list[i] == obj) {
314 break;
315 }
316 }
317 if (i < 0) {
318 // not found!
319 return 0;
320 }
321
322 if (i != pList->count-1) {
323 // copy the last entry to the new free slot
324 pList->list[i] = pList->list[pList->count-1];
325 }
326 pList->count--;
327 memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
328 return 1;
329}
330
331static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
332 historyListAdd(&parent->children, child);
333 historyListAdd(&child->parents, parent);
334}
335
336static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
337 historyListRemove(&parent->children, child);
338 historyListRemove(&child->parents, parent);
339}
340
341/****************************************************************************/
342
343static void callstackListAdd(CallStackList* pList,
Elliott Hughes239e7a02013-01-25 17:13:45 -0800344 int count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700345 growingListAdd(pList, sizeof(CallStackListEntry));
346 dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
347}
348
349/****************************************************************************/
350
351/*
352 * Recursively traverse the object hierarchy starting at "obj". We mark
353 * ourselves on entry and clear the mark on exit. If we ever encounter
354 * a marked object, we have a cycle.
355 *
356 * Returns "true" if all is well, "false" if we found a cycle.
357 */
358
359static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
360{
361 /*
362 * Have we been here before?
363 */
364 if (obj->historyMark) {
365 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800366 uintptr_t addrs[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700367
368 /* Turn off prediction temporarily in this thread while logging */
369 sPthreadDebugDisabledThread = gettid();
370
Elliott Hughes35b621c2013-01-28 16:27:36 -0800371 backtrace_startup();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700372
373 LOGW("%s\n", kStartBanner);
374 LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
375 LOGW("Illegal lock attempt:\n");
376 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
377 stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
Elliott Hughes35b621c2013-01-28 16:27:36 -0800378 log_backtrace(addrs, stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700379
380 LOGW("+++ Currently held locks in this thread (in reverse order):");
381 MutexInfo* cur = obj;
382 pid_t ourtid = gettid();
383 int i;
384 for (i=0 ; i<cur->parents.count ; i++) {
385 MutexInfo* parent = cur->parents.list[i];
386 if (parent->owner == ourtid) {
387 LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
388 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800389 log_backtrace(parent->stackTrace, parent->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700390 }
391 cur = parent;
392 break;
393 }
394 }
395
396 LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
397 return 0;
398 }
399
400 obj->historyMark = 1;
401
402 MutexInfoList* pList = &obj->children;
403 int result = 1;
404 int i;
405 for (i = pList->count-1; i >= 0; i--) {
406 MutexInfo* child = pList->list[i];
407 if (!traverseTree(child, obj)) {
408 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
409 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
410 int index = historyListHas(&obj->parents, objParent);
411 if ((size_t)index < (size_t)obj->stacks.count) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800412 log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700413 } else {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800414 log_backtrace(obj->stackTrace, obj->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700415 }
416 }
417 result = 0;
418 break;
419 }
420 }
421
422 obj->historyMark = 0;
423 return result;
424}
425
426/****************************************************************************/
427
428static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
429{
430 pid_t tid = gettid();
431 if (object->owner == tid) {
432 object->lockCount++;
433 return;
434 }
435
436 object->owner = tid;
437 object->lockCount = 0;
438
439 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
440 // always record the call stack when acquiring a lock.
441 // it's not efficient, but is useful during diagnostics
442 object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
443 }
444
445 // no other locks held in this thread -- no deadlock possible!
446 if (mrl == NULL)
447 return;
448
449 // check if the lock we're trying to acquire is a direct descendant of
450 // the most recently locked mutex in this thread, in which case we're
451 // in a good situation -- no deadlock possible
452 if (historyListHas(&mrl->children, object) >= 0)
453 return;
454
455 pthread_mutex_lock_unchecked(&sDbgLock);
456
457 linkParentToChild(mrl, object);
458 if (!traverseTree(object, mrl)) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800459 backtrace_shutdown();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700460 LOGW("%s\n", kEndBanner);
461 unlinkParentFromChild(mrl, object);
462 // reenable pthread debugging for this thread
463 sPthreadDebugDisabledThread = -1;
464 } else {
465 // record the call stack for this link
466 // NOTE: the call stack is added at the same index
467 // as mrl in object->parents[]
468 // ie: object->parents.count == object->stacks.count, which is
469 // also the index.
470 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
471 callstackListAdd(&object->stacks,
472 object->stackDepth, object->stackTrace);
473 }
474 }
475
476 pthread_mutex_unlock_unchecked(&sDbgLock);
477}
478
479static void mutex_unlock_checked(MutexInfo* object)
480{
481 pid_t tid = gettid();
482 if (object->owner == tid) {
483 if (object->lockCount == 0) {
484 object->owner = 0;
485 } else {
486 object->lockCount--;
487 }
488 }
489}
490
491
492// =============================================================================
493// Hash Table functions
494// =============================================================================
495
496/****************************************************************************/
497
498#define HASHTABLE_SIZE 256
499
500typedef struct HashEntry HashEntry;
501struct HashEntry {
502 size_t slot;
503 HashEntry* prev;
504 HashEntry* next;
505 void* data;
506};
507
508typedef struct HashTable HashTable;
509struct HashTable {
510 HashEntry* slots[HASHTABLE_SIZE];
511};
512
513static HashTable sMutexMap;
514static HashTable sThreadMap;
515
516/****************************************************************************/
517
518static uint32_t get_hashcode(void const * key, size_t keySize)
519{
520 uint32_t h = keySize;
521 char const* data = (char const*)key;
522 size_t i;
523 for (i = 0; i < keySize; i++) {
524 h = h * 31 + *data;
525 data++;
526 }
527 return (uint32_t)h;
528}
529
530static size_t get_index(uint32_t h)
531{
532 // We apply this secondary hashing discovered by Doug Lea to defend
533 // against bad hashes.
534 h += ~(h << 9);
535 h ^= (((unsigned int) h) >> 14);
536 h += (h << 4);
537 h ^= (((unsigned int) h) >> 10);
538 return (size_t)h & (HASHTABLE_SIZE - 1);
539}
540
541/****************************************************************************/
542
543static void hashmap_init(HashTable* table) {
544 memset(table, 0, sizeof(HashTable));
545}
546
547static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
548{
549 HashEntry* prev = entry->prev;
550 HashEntry* next = entry->next;
551 if (prev != NULL) entry->prev->next = next;
552 if (next != NULL) entry->next->prev = prev;
553 if (prev == NULL) {
554 // we are the head of the list. set the head to be next
555 table->slots[entry->slot] = entry->next;
556 }
557}
558
559static HashEntry* hashmap_lookup(HashTable* table,
560 void const* key, size_t ksize,
561 int (*equals)(void const* data, void const* key))
562{
563 const uint32_t hash = get_hashcode(key, ksize);
564 const size_t slot = get_index(hash);
565
566 HashEntry* entry = table->slots[slot];
567 while (entry) {
568 if (equals(entry->data, key)) {
569 break;
570 }
571 entry = entry->next;
572 }
573
574 if (entry == NULL) {
575 // create a new entry
Elliott Hughes1e980b62013-01-17 18:36:06 -0800576 entry = DbgAllocLocked<HashEntry>();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700577 entry->data = NULL;
578 entry->slot = slot;
579 entry->prev = NULL;
580 entry->next = table->slots[slot];
581 if (entry->next != NULL) {
582 entry->next->prev = entry;
583 }
584 table->slots[slot] = entry;
585 }
586 return entry;
587}
588
589/****************************************************************************/
590
591static int MutexInfo_equals(void const* data, void const* key) {
592 return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
593}
594
595static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
596{
597 pthread_mutex_lock_unchecked(&sDbgLock);
598
599 HashEntry* entry = hashmap_lookup(&sMutexMap,
600 &mutex, sizeof(mutex),
601 &MutexInfo_equals);
602 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800603 MutexInfo* mutex_info = DbgAllocLocked<MutexInfo>();
604 entry->data = mutex_info;
605 initMutexInfo(mutex_info, mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700606 }
607
608 pthread_mutex_unlock_unchecked(&sDbgLock);
609
610 return (MutexInfo *)entry->data;
611}
612
613/****************************************************************************/
614
615static int ThreadInfo_equals(void const* data, void const* key) {
616 return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
617}
618
619static ThreadInfo* get_thread_info(pid_t pid)
620{
621 pthread_mutex_lock_unchecked(&sDbgLock);
622
623 HashEntry* entry = hashmap_lookup(&sThreadMap,
624 &pid, sizeof(pid),
625 &ThreadInfo_equals);
626 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800627 ThreadInfo* thread_info = DbgAllocLocked<ThreadInfo>();
628 entry->data = thread_info;
629 initThreadInfo(thread_info, pid);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700630 }
631
632 pthread_mutex_unlock_unchecked(&sDbgLock);
633
634 return (ThreadInfo *)entry->data;
635}
636
637static void push_most_recently_locked(MutexInfo* mrl) {
638 ThreadInfo* tinfo = get_thread_info(gettid());
639 mrl->next = NULL;
640 mrl->prev = tinfo->mrl;
641 tinfo->mrl = mrl;
642}
643
644static void remove_most_recently_locked(MutexInfo* mrl) {
645 ThreadInfo* tinfo = get_thread_info(gettid());
646 if (mrl->next) {
647 (mrl->next)->prev = mrl->prev;
648 }
649 if (mrl->prev) {
650 (mrl->prev)->next = mrl->next;
651 }
652 if (tinfo->mrl == mrl) {
653 tinfo->mrl = mrl->next;
654 }
655}
656
657static MutexInfo* get_most_recently_locked() {
658 ThreadInfo* tinfo = get_thread_info(gettid());
659 return tinfo->mrl;
660}
661
662/****************************************************************************/
663
664/* pthread_debug_init() is called from libc_init_dynamic() just
665 * after system properties have been initialized
666 */
667
Elliott Hughes1e980b62013-01-17 18:36:06 -0800668extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700669 char env[PROP_VALUE_MAX];
670 if (__system_property_get("debug.libc.pthread", env)) {
671 int level = atoi(env);
672 if (level) {
673 LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
674 level, getpid(), __progname);
675 hashmap_init(&sMutexMap);
676 sPthreadDebugLevel = level;
677 }
678 }
679}
680
681/*
682 * See if we were allowed to grab the lock at this time. We do it
683 * *after* acquiring the lock, rather than before, so that we can
684 * freely update the MutexInfo struct. This seems counter-intuitive,
685 * but our goal is deadlock *prediction* not deadlock *prevention*.
686 * (If we actually deadlock, the situation is easy to diagnose from
687 * a thread dump, so there's no point making a special effort to do
688 * the checks before the lock is held.)
689 */
690
Elliott Hughes1e980b62013-01-17 18:36:06 -0800691extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700692{
693 if (sPthreadDebugLevel == 0) return;
694 // prediction disabled for this thread
695 if (sPthreadDebugDisabledThread == gettid())
696 return;
697 MutexInfo* object = get_mutex_info(mutex);
698 MutexInfo* mrl = get_most_recently_locked();
699 mutex_lock_checked(mrl, object);
700 push_most_recently_locked(object);
701}
702
703/*
704 * pthread_debug_mutex_unlock_check() must be called with the mutex
705 * still held (ie: before calling the real unlock)
706 */
707
Elliott Hughes1e980b62013-01-17 18:36:06 -0800708extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700709{
710 if (sPthreadDebugLevel == 0) return;
711 // prediction disabled for this thread
712 if (sPthreadDebugDisabledThread == gettid())
713 return;
714 MutexInfo* object = get_mutex_info(mutex);
715 remove_most_recently_locked(object);
716 mutex_unlock_checked(object);
717}