blob: 94c23de35df5f8c5afdf57afb989c5b4aa5eec62 [file] [log] [blame]
Mathias Agopian7c0c3792011-09-05 23:54:55 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30#include <sys/atomics.h>
31#include <sys/system_properties.h>
32#include <sys/mman.h>
33
Elliott Hughes1e980b62013-01-17 18:36:06 -080034//#include <dlfcn.h>
Mathias Agopian7c0c3792011-09-05 23:54:55 -070035#include <stdint.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <errno.h>
39#include <pthread.h>
40#include <unwind.h>
41#include <unistd.h>
42
Mathias Agopian7c0c3792011-09-05 23:54:55 -070043#include "bionic_tls.h"
Elliott Hughes1e980b62013-01-17 18:36:06 -080044#include "debug_mapinfo.h"
45#include "debug_stacktrace.h"
Elliott Hughes8f2a5a02013-03-15 15:30:25 -070046#include "libc_logging.h"
Mathias Agopian7c0c3792011-09-05 23:54:55 -070047
48/*
49 * ===========================================================================
50 * Deadlock prediction
51 * ===========================================================================
52 */
53/*
54The idea is to predict the possibility of deadlock by recording the order
55in which locks are acquired. If we see an attempt to acquire a lock
56out of order, we can identify the locks and offending code.
57
58To make this work, we need to keep track of the locks held by each thread,
59and create history trees for each lock. When a thread tries to acquire
60a new lock, we walk through the "history children" of the lock, looking
61for a match with locks the thread already holds. If we find a match,
62it means the thread has made a request that could result in a deadlock.
63
64To support recursive locks, we always allow re-locking a currently-held
65lock, and maintain a recursion depth count.
66
67An ASCII-art example, where letters represent locks:
68
69 A
70 /|\
71 / | \
72 B | D
73 \ |
74 \|
75 C
76
77The above is the tree we'd have after handling lock synchronization
78sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
79a child of B. (The lines represent pointers between parent and child.
80Every node can have multiple parents and multiple children.)
81
82If we hold AC, and want to lock B, we recursively search through B's
83children to see if A or C appears. It does, so we reject the attempt.
84(A straightforward way to implement it: add a link from C to B, then
85determine whether the graph starting at B contains a cycle.)
86
87If we hold AC and want to lock D, we would succeed, creating a new link
88from C to D.
89
90Updates to MutexInfo structs are only allowed for the thread that holds
91the lock, so we actually do most of our deadlock prediction work after
92the lock has been acquired.
93*/
94
95// =============================================================================
96// log functions
97// =============================================================================
98
99#define LOGD(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800100 __libc_format_log(ANDROID_LOG_DEBUG, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700101
102#define LOGW(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800103 __libc_format_log(ANDROID_LOG_WARN, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700104
105#define LOGE(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800106 __libc_format_log(ANDROID_LOG_ERROR, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700107
108#define LOGI(format, ...) \
Elliott Hughes1e980b62013-01-17 18:36:06 -0800109 __libc_format_log(ANDROID_LOG_INFO, "pthread_debug", (format), ##__VA_ARGS__ )
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700110
111static const char* const kStartBanner =
112 "===============================================================";
113
114static const char* const kEndBanner =
115 "===============================================================";
116
Elliott Hughese4ccf5a2013-02-07 12:06:44 -0800117extern const char* __progname;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700118
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700119#define STACK_TRACE_DEPTH 16
120
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700121/****************************************************************************/
122
123/*
124 * level <= 0 : deadlock prediction disabled
125 * level 1 : deadlock prediction enabled, w/o call stacks
126 * level 2 : deadlock prediction enabled w/ call stacks
127 */
128#define CAPTURE_CALLSTACK 2
129static int sPthreadDebugLevel = 0;
130static pid_t sPthreadDebugDisabledThread = -1;
131static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
132
133/****************************************************************************/
134
135/* some simple/lame malloc replacement
136 * NOT thread-safe and leaks everything
137 */
138
139#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
140static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
141static char* sDbgAllocPtr = NULL;
142
Elliott Hughes1e980b62013-01-17 18:36:06 -0800143template <typename T>
144static T* DbgAllocLocked(size_t count = 1) {
145 size_t size = sizeof(T) * count;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700146 if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
147 sDbgAllocOffset = 0;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800148 sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
149 PROT_READ|PROT_WRITE,
150 MAP_ANON | MAP_PRIVATE, 0, 0));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700151 if (sDbgAllocPtr == MAP_FAILED) {
152 return NULL;
153 }
154 }
155 void* addr = sDbgAllocPtr + sDbgAllocOffset;
156 sDbgAllocOffset += size;
Elliott Hughes1e980b62013-01-17 18:36:06 -0800157 return reinterpret_cast<T*>(addr);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700158}
159
160static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
161 void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
162 MAP_ANON | MAP_PRIVATE, 0, 0);
163 if (addr != MAP_FAILED) {
164 if (ptr) {
165 memcpy(addr, ptr, old_size);
166 munmap(ptr, old_size);
167 }
168 } else {
169 addr = NULL;
170 }
171 return addr;
172}
173
174/*****************************************************************************/
175
176struct MutexInfo;
177
178typedef struct CallStack {
Elliott Hughes239e7a02013-01-25 17:13:45 -0800179 uintptr_t depth;
180 uintptr_t* addrs;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700181} CallStack;
182
183typedef struct MutexInfo* MutexInfoListEntry;
184typedef struct CallStack CallStackListEntry;
185
186typedef struct GrowingList {
187 int alloc;
188 int count;
189 union {
190 void* data;
191 MutexInfoListEntry* list;
192 CallStackListEntry* stack;
193 };
194} GrowingList;
195
196typedef GrowingList MutexInfoList;
197typedef GrowingList CallStackList;
198
199typedef struct MutexInfo {
200 // thread currently holding the lock or 0
201 pid_t owner;
202
203 // most-recently-locked doubly-linked list
204 struct MutexInfo* prev;
205 struct MutexInfo* next;
206
207 // for reentrant locks
208 int lockCount;
209 // when looking for loops in the graph, marks visited nodes
210 int historyMark;
211 // the actual mutex
212 pthread_mutex_t* mutex;
213 // list of locks directly acquired AFTER this one in the same thread
214 MutexInfoList children;
215 // list of locks directly acquired BEFORE this one in the same thread
216 MutexInfoList parents;
217 // list of call stacks when a new link is established to this lock form its parent
218 CallStackList stacks;
219 // call stack when this lock was acquired last
220 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800221 uintptr_t stackTrace[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700222} MutexInfo;
223
224static void growingListInit(GrowingList* list) {
225 list->alloc = 0;
226 list->count = 0;
227 list->data = NULL;
228}
229
230static void growingListAdd(GrowingList* pList, size_t objSize) {
231 if (pList->count == pList->alloc) {
232 size_t oldsize = pList->alloc * objSize;
233 pList->alloc += PAGESIZE / objSize;
234 size_t size = pList->alloc * objSize;
235 pList->data = debug_realloc(pList->data, size, oldsize);
236 }
237 pList->count++;
238}
239
240static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
241 object->owner = 0;
242 object->prev = 0;
243 object->next = 0;
244 object->lockCount = 0;
245 object->historyMark = 0;
246 object->mutex = mutex;
247 growingListInit(&object->children);
248 growingListInit(&object->parents);
249 growingListInit(&object->stacks);
250 object->stackDepth = 0;
251}
252
253typedef struct ThreadInfo {
254 pid_t pid;
255 MutexInfo* mrl;
256} ThreadInfo;
257
258static void initThreadInfo(ThreadInfo* object, pid_t pid) {
259 object->pid = pid;
260 object->mrl = NULL;
261}
262
263/****************************************************************************/
264
265static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
266static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
267static void mutex_unlock_checked(MutexInfo* object);
268
269/****************************************************************************/
270
Elliott Hughesf90b95e2013-01-22 11:20:45 -0800271extern "C" int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
272extern "C" int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700273
274static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
275 return pthread_mutex_lock_impl(mutex);
276}
277
278static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
279 return pthread_mutex_unlock_impl(mutex);
280}
281
282/****************************************************************************/
283
Elliott Hughes239e7a02013-01-25 17:13:45 -0800284static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700285 stack->depth = count;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800286 stack->addrs = DbgAllocLocked<uintptr_t>(count);
287 memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700288}
289
290/****************************************************************************/
291
292static int historyListHas(
293 const MutexInfoList* list, MutexInfo const * obj) {
294 int i;
295 for (i=0; i<list->count; i++) {
296 if (list->list[i] == obj) {
297 return i;
298 }
299 }
300 return -1;
301}
302
303static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
304 growingListAdd(pList, sizeof(MutexInfoListEntry));
305 pList->list[pList->count - 1] = obj;
306}
307
308static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
309 int i;
310 for (i = pList->count-1; i >= 0; i--) {
311 if (pList->list[i] == obj) {
312 break;
313 }
314 }
315 if (i < 0) {
316 // not found!
317 return 0;
318 }
319
320 if (i != pList->count-1) {
321 // copy the last entry to the new free slot
322 pList->list[i] = pList->list[pList->count-1];
323 }
324 pList->count--;
325 memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
326 return 1;
327}
328
329static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
330 historyListAdd(&parent->children, child);
331 historyListAdd(&child->parents, parent);
332}
333
334static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
335 historyListRemove(&parent->children, child);
336 historyListRemove(&child->parents, parent);
337}
338
339/****************************************************************************/
340
341static void callstackListAdd(CallStackList* pList,
Elliott Hughes239e7a02013-01-25 17:13:45 -0800342 int count, uintptr_t const* addrs) {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700343 growingListAdd(pList, sizeof(CallStackListEntry));
344 dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
345}
346
347/****************************************************************************/
348
349/*
350 * Recursively traverse the object hierarchy starting at "obj". We mark
351 * ourselves on entry and clear the mark on exit. If we ever encounter
352 * a marked object, we have a cycle.
353 *
354 * Returns "true" if all is well, "false" if we found a cycle.
355 */
356
357static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
358{
359 /*
360 * Have we been here before?
361 */
362 if (obj->historyMark) {
363 int stackDepth;
Elliott Hughes239e7a02013-01-25 17:13:45 -0800364 uintptr_t addrs[STACK_TRACE_DEPTH];
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700365
366 /* Turn off prediction temporarily in this thread while logging */
367 sPthreadDebugDisabledThread = gettid();
368
Elliott Hughes35b621c2013-01-28 16:27:36 -0800369 backtrace_startup();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700370
371 LOGW("%s\n", kStartBanner);
372 LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
373 LOGW("Illegal lock attempt:\n");
374 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
375 stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
Elliott Hughes35b621c2013-01-28 16:27:36 -0800376 log_backtrace(addrs, stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700377
378 LOGW("+++ Currently held locks in this thread (in reverse order):");
379 MutexInfo* cur = obj;
380 pid_t ourtid = gettid();
381 int i;
382 for (i=0 ; i<cur->parents.count ; i++) {
383 MutexInfo* parent = cur->parents.list[i];
384 if (parent->owner == ourtid) {
385 LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
386 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800387 log_backtrace(parent->stackTrace, parent->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700388 }
389 cur = parent;
390 break;
391 }
392 }
393
394 LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
395 return 0;
396 }
397
398 obj->historyMark = 1;
399
400 MutexInfoList* pList = &obj->children;
401 int result = 1;
402 int i;
403 for (i = pList->count-1; i >= 0; i--) {
404 MutexInfo* child = pList->list[i];
405 if (!traverseTree(child, obj)) {
406 LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
407 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
408 int index = historyListHas(&obj->parents, objParent);
409 if ((size_t)index < (size_t)obj->stacks.count) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800410 log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700411 } else {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800412 log_backtrace(obj->stackTrace, obj->stackDepth);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700413 }
414 }
415 result = 0;
416 break;
417 }
418 }
419
420 obj->historyMark = 0;
421 return result;
422}
423
424/****************************************************************************/
425
426static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
427{
428 pid_t tid = gettid();
429 if (object->owner == tid) {
430 object->lockCount++;
431 return;
432 }
433
434 object->owner = tid;
435 object->lockCount = 0;
436
437 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
438 // always record the call stack when acquiring a lock.
439 // it's not efficient, but is useful during diagnostics
440 object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
441 }
442
443 // no other locks held in this thread -- no deadlock possible!
444 if (mrl == NULL)
445 return;
446
447 // check if the lock we're trying to acquire is a direct descendant of
448 // the most recently locked mutex in this thread, in which case we're
449 // in a good situation -- no deadlock possible
450 if (historyListHas(&mrl->children, object) >= 0)
451 return;
452
453 pthread_mutex_lock_unchecked(&sDbgLock);
454
455 linkParentToChild(mrl, object);
456 if (!traverseTree(object, mrl)) {
Elliott Hughes35b621c2013-01-28 16:27:36 -0800457 backtrace_shutdown();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700458 LOGW("%s\n", kEndBanner);
459 unlinkParentFromChild(mrl, object);
460 // reenable pthread debugging for this thread
461 sPthreadDebugDisabledThread = -1;
462 } else {
463 // record the call stack for this link
464 // NOTE: the call stack is added at the same index
465 // as mrl in object->parents[]
466 // ie: object->parents.count == object->stacks.count, which is
467 // also the index.
468 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
469 callstackListAdd(&object->stacks,
470 object->stackDepth, object->stackTrace);
471 }
472 }
473
474 pthread_mutex_unlock_unchecked(&sDbgLock);
475}
476
477static void mutex_unlock_checked(MutexInfo* object)
478{
479 pid_t tid = gettid();
480 if (object->owner == tid) {
481 if (object->lockCount == 0) {
482 object->owner = 0;
483 } else {
484 object->lockCount--;
485 }
486 }
487}
488
489
490// =============================================================================
491// Hash Table functions
492// =============================================================================
493
494/****************************************************************************/
495
496#define HASHTABLE_SIZE 256
497
498typedef struct HashEntry HashEntry;
499struct HashEntry {
500 size_t slot;
501 HashEntry* prev;
502 HashEntry* next;
503 void* data;
504};
505
506typedef struct HashTable HashTable;
507struct HashTable {
508 HashEntry* slots[HASHTABLE_SIZE];
509};
510
511static HashTable sMutexMap;
512static HashTable sThreadMap;
513
514/****************************************************************************/
515
516static uint32_t get_hashcode(void const * key, size_t keySize)
517{
518 uint32_t h = keySize;
519 char const* data = (char const*)key;
520 size_t i;
521 for (i = 0; i < keySize; i++) {
522 h = h * 31 + *data;
523 data++;
524 }
525 return (uint32_t)h;
526}
527
528static size_t get_index(uint32_t h)
529{
530 // We apply this secondary hashing discovered by Doug Lea to defend
531 // against bad hashes.
532 h += ~(h << 9);
533 h ^= (((unsigned int) h) >> 14);
534 h += (h << 4);
535 h ^= (((unsigned int) h) >> 10);
536 return (size_t)h & (HASHTABLE_SIZE - 1);
537}
538
539/****************************************************************************/
540
541static void hashmap_init(HashTable* table) {
542 memset(table, 0, sizeof(HashTable));
543}
544
545static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
546{
547 HashEntry* prev = entry->prev;
548 HashEntry* next = entry->next;
549 if (prev != NULL) entry->prev->next = next;
550 if (next != NULL) entry->next->prev = prev;
551 if (prev == NULL) {
552 // we are the head of the list. set the head to be next
553 table->slots[entry->slot] = entry->next;
554 }
555}
556
557static HashEntry* hashmap_lookup(HashTable* table,
558 void const* key, size_t ksize,
559 int (*equals)(void const* data, void const* key))
560{
561 const uint32_t hash = get_hashcode(key, ksize);
562 const size_t slot = get_index(hash);
563
564 HashEntry* entry = table->slots[slot];
565 while (entry) {
566 if (equals(entry->data, key)) {
567 break;
568 }
569 entry = entry->next;
570 }
571
572 if (entry == NULL) {
573 // create a new entry
Elliott Hughes1e980b62013-01-17 18:36:06 -0800574 entry = DbgAllocLocked<HashEntry>();
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700575 entry->data = NULL;
576 entry->slot = slot;
577 entry->prev = NULL;
578 entry->next = table->slots[slot];
579 if (entry->next != NULL) {
580 entry->next->prev = entry;
581 }
582 table->slots[slot] = entry;
583 }
584 return entry;
585}
586
587/****************************************************************************/
588
589static int MutexInfo_equals(void const* data, void const* key) {
590 return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
591}
592
593static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
594{
595 pthread_mutex_lock_unchecked(&sDbgLock);
596
597 HashEntry* entry = hashmap_lookup(&sMutexMap,
598 &mutex, sizeof(mutex),
599 &MutexInfo_equals);
600 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800601 MutexInfo* mutex_info = DbgAllocLocked<MutexInfo>();
602 entry->data = mutex_info;
603 initMutexInfo(mutex_info, mutex);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700604 }
605
606 pthread_mutex_unlock_unchecked(&sDbgLock);
607
608 return (MutexInfo *)entry->data;
609}
610
611/****************************************************************************/
612
613static int ThreadInfo_equals(void const* data, void const* key) {
614 return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
615}
616
617static ThreadInfo* get_thread_info(pid_t pid)
618{
619 pthread_mutex_lock_unchecked(&sDbgLock);
620
621 HashEntry* entry = hashmap_lookup(&sThreadMap,
622 &pid, sizeof(pid),
623 &ThreadInfo_equals);
624 if (entry->data == NULL) {
Elliott Hughes1e980b62013-01-17 18:36:06 -0800625 ThreadInfo* thread_info = DbgAllocLocked<ThreadInfo>();
626 entry->data = thread_info;
627 initThreadInfo(thread_info, pid);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700628 }
629
630 pthread_mutex_unlock_unchecked(&sDbgLock);
631
632 return (ThreadInfo *)entry->data;
633}
634
635static void push_most_recently_locked(MutexInfo* mrl) {
636 ThreadInfo* tinfo = get_thread_info(gettid());
637 mrl->next = NULL;
638 mrl->prev = tinfo->mrl;
639 tinfo->mrl = mrl;
640}
641
642static void remove_most_recently_locked(MutexInfo* mrl) {
643 ThreadInfo* tinfo = get_thread_info(gettid());
644 if (mrl->next) {
645 (mrl->next)->prev = mrl->prev;
646 }
647 if (mrl->prev) {
648 (mrl->prev)->next = mrl->next;
649 }
650 if (tinfo->mrl == mrl) {
651 tinfo->mrl = mrl->next;
652 }
653}
654
655static MutexInfo* get_most_recently_locked() {
656 ThreadInfo* tinfo = get_thread_info(gettid());
657 return tinfo->mrl;
658}
659
660/****************************************************************************/
661
662/* pthread_debug_init() is called from libc_init_dynamic() just
663 * after system properties have been initialized
664 */
665
Elliott Hughes1e980b62013-01-17 18:36:06 -0800666extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700667 char env[PROP_VALUE_MAX];
668 if (__system_property_get("debug.libc.pthread", env)) {
669 int level = atoi(env);
670 if (level) {
671 LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
672 level, getpid(), __progname);
673 hashmap_init(&sMutexMap);
674 sPthreadDebugLevel = level;
675 }
676 }
677}
678
679/*
680 * See if we were allowed to grab the lock at this time. We do it
681 * *after* acquiring the lock, rather than before, so that we can
682 * freely update the MutexInfo struct. This seems counter-intuitive,
683 * but our goal is deadlock *prediction* not deadlock *prevention*.
684 * (If we actually deadlock, the situation is easy to diagnose from
685 * a thread dump, so there's no point making a special effort to do
686 * the checks before the lock is held.)
687 */
688
Elliott Hughes1e980b62013-01-17 18:36:06 -0800689extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700690{
691 if (sPthreadDebugLevel == 0) return;
692 // prediction disabled for this thread
693 if (sPthreadDebugDisabledThread == gettid())
694 return;
695 MutexInfo* object = get_mutex_info(mutex);
696 MutexInfo* mrl = get_most_recently_locked();
697 mutex_lock_checked(mrl, object);
698 push_most_recently_locked(object);
699}
700
701/*
702 * pthread_debug_mutex_unlock_check() must be called with the mutex
703 * still held (ie: before calling the real unlock)
704 */
705
Elliott Hughes1e980b62013-01-17 18:36:06 -0800706extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700707{
708 if (sPthreadDebugLevel == 0) return;
709 // prediction disabled for this thread
710 if (sPthreadDebugDisabledThread == gettid())
711 return;
712 MutexInfo* object = get_mutex_info(mutex);
713 remove_most_recently_locked(object);
714 mutex_unlock_checked(object);
715}