blob: 821ea234451dd81e82f1457fd739228ea072c761 [file] [log] [blame]
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <errno.h>
29#include <pthread.h>
30#include <stdio.h>
31#include <arpa/inet.h>
32#include <sys/socket.h>
33#include <stdlib.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <stddef.h>
38#include <stdarg.h>
39#include <fcntl.h>
40#include <unwind.h>
41
42#include <sys/socket.h>
43#include <sys/un.h>
44#include <sys/select.h>
45#include <sys/types.h>
46#include <sys/system_properties.h>
47
48#include "dlmalloc.h"
49#include "logd.h"
50
51// =============================================================================
52// Utilities directly used by Dalvik
53// =============================================================================
54
55#define HASHTABLE_SIZE 1543
56#define BACKTRACE_SIZE 32
57/* flag definitions, currently sharing storage with "size" */
58#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
59#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
60
61/*
62 * In a VM process, this is set to 1 after fork()ing out of zygote.
63 */
64int gMallocLeakZygoteChild = 0;
65
66// =============================================================================
67// Structures
68// =============================================================================
69
70typedef struct HashEntry HashEntry;
71struct HashEntry {
72 size_t slot;
73 HashEntry* prev;
74 HashEntry* next;
75 size_t numEntries;
76 // fields above "size" are NOT sent to the host
77 size_t size;
78 size_t allocations;
79 intptr_t backtrace[0];
80};
81
82typedef struct HashTable HashTable;
83struct HashTable {
84 size_t count;
85 HashEntry* slots[HASHTABLE_SIZE];
86};
87
88static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
89static HashTable gHashTable;
90
91// =============================================================================
92// output fucntions
93// =============================================================================
94
95static int hash_entry_compare(const void* arg1, const void* arg2)
96{
97 HashEntry* e1 = *(HashEntry**)arg1;
98 HashEntry* e2 = *(HashEntry**)arg2;
99
100 size_t nbAlloc1 = e1->allocations;
101 size_t nbAlloc2 = e2->allocations;
102 size_t size1 = e1->size & ~SIZE_FLAG_MASK;
103 size_t size2 = e2->size & ~SIZE_FLAG_MASK;
104 size_t alloc1 = nbAlloc1 * size1;
105 size_t alloc2 = nbAlloc2 * size2;
106
107 // sort in descending order by:
108 // 1) total size
109 // 2) number of allocations
110 //
111 // This is used for sorting, not determination of equality, so we don't
112 // need to compare the bit flags.
113 int result;
114 if (alloc1 > alloc2) {
115 result = -1;
116 } else if (alloc1 < alloc2) {
117 result = 1;
118 } else {
119 if (nbAlloc1 > nbAlloc2) {
120 result = -1;
121 } else if (nbAlloc1 < nbAlloc2) {
122 result = 1;
123 } else {
124 result = 0;
125 }
126 }
127 return result;
128}
129
130/*
131 * Retrieve native heap information.
132 *
133 * "*info" is set to a buffer we allocate
134 * "*overallSize" is set to the size of the "info" buffer
135 * "*infoSize" is set to the size of a single entry
136 * "*totalMemory" is set to the sum of all allocations we're tracking; does
137 * not include heap overhead
138 * "*backtraceSize" is set to the maximum number of entries in the back trace
139 */
140void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
141 size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
142{
143 // don't do anything if we have invalid arguments
144 if (info == NULL || overallSize == NULL || infoSize == NULL ||
145 totalMemory == NULL || backtraceSize == NULL) {
146 return;
147 }
148
149 pthread_mutex_lock(&gAllocationsMutex);
150
151 if (gHashTable.count == 0) {
152 *info = NULL;
153 *overallSize = 0;
154 *infoSize = 0;
155 *totalMemory = 0;
156 *backtraceSize = 0;
157 goto done;
158 }
159
160 void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
161
162 // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
163 // debug_log("list = %p\n", list);
164
165 // get the entries into an array to be sorted
166 int index = 0;
167 int i;
168 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
169 HashEntry* entry = gHashTable.slots[i];
170 while (entry != NULL) {
171 list[index] = entry;
172 *totalMemory = *totalMemory +
173 ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
174 index++;
175 entry = entry->next;
176 }
177 }
178
179 // debug_log("sorted list!\n");
180 // XXX: the protocol doesn't allow variable size for the stack trace (yet)
181 *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
182 *overallSize = *infoSize * gHashTable.count;
183 *backtraceSize = BACKTRACE_SIZE;
184
185 // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
186 // now get A byte array big enough for this
187 *info = (uint8_t*)dlmalloc(*overallSize);
188
189 // debug_log("info = %p\n", info);
190 if (*info == NULL) {
191 *overallSize = 0;
192 goto done;
193 }
194
195 // debug_log("sorting list...\n");
196 qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
197
198 uint8_t* head = *info;
199 const int count = gHashTable.count;
200 for (i = 0 ; i < count ; i++) {
201 HashEntry* entry = list[i];
202 size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
203 if (entrySize < *infoSize) {
204 /* we're writing less than a full entry, clear out the rest */
205 /* TODO: only clear out the part we're not overwriting? */
206 memset(head, 0, *infoSize);
207 } else {
208 /* make sure the amount we're copying doesn't exceed the limit */
209 entrySize = *infoSize;
210 }
211 memcpy(head, &(entry->size), entrySize);
212 head += *infoSize;
213 }
214
215 dlfree(list);
216
217done:
218 // debug_log("+++++ done!\n");
219 pthread_mutex_unlock(&gAllocationsMutex);
220}
221
222void free_malloc_leak_info(uint8_t* info)
223{
224 dlfree(info);
225}
226
227struct mallinfo mallinfo()
228{
229 return dlmallinfo();
230}
231
232void* valloc(size_t bytes) {
233 /* assume page size of 4096 bytes */
234 return memalign( getpagesize(), bytes );
235}
236
237
238/*
239 * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
240 * enabled. Currently we exclude them in libc.so, and only include them in
241 * libc_debug.so.
242 */
243#ifdef MALLOC_LEAK_CHECK
244#define MALLOC_ALIGNMENT 8
245#define GUARD 0x48151642
246
247#define DEBUG 0
248
249// =============================================================================
250// Structures
251// =============================================================================
252typedef struct AllocationEntry AllocationEntry;
253struct AllocationEntry {
254 HashEntry* entry;
255 uint32_t guard;
256};
257
258// =============================================================================
259// log funtions
260// =============================================================================
261
262#define debug_log(format, ...) \
263 __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
264
265// =============================================================================
266// Hash Table functions
267// =============================================================================
268static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
269{
270 if (backtrace == NULL) return 0;
271
272 int hash = 0;
273 size_t i;
274 for (i = 0 ; i < numEntries ; i++) {
275 hash = (hash * 33) + (backtrace[i] >> 2);
276 }
277
278 return hash;
279}
280
281static HashEntry* find_entry(HashTable* table, int slot,
282 intptr_t* backtrace, size_t numEntries, size_t size)
283{
284 HashEntry* entry = table->slots[slot];
285 while (entry != NULL) {
286 //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
287 // backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
288 /*
289 * See if the entry matches exactly. We compare the "size" field,
290 * including the flag bits.
291 */
292 if (entry->size == size && entry->numEntries == numEntries &&
293 !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
294 return entry;
295 }
296
297 entry = entry->next;
298 }
299
300 return NULL;
301}
302
303static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
304{
305 size_t hash = get_hash(backtrace, numEntries);
306 size_t slot = hash % HASHTABLE_SIZE;
307
308 if (size & SIZE_FLAG_MASK) {
309 debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
310 abort();
311 }
312
313 if (gMallocLeakZygoteChild)
314 size |= SIZE_FLAG_ZYGOTE_CHILD;
315
316 HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
317
318 if (entry != NULL) {
319 entry->allocations++;
320 } else {
321 // create a new entry
322 entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
323 entry->allocations = 1;
324 entry->slot = slot;
325 entry->prev = NULL;
326 entry->next = gHashTable.slots[slot];
327 entry->numEntries = numEntries;
328 entry->size = size;
329
330 memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
331
332 gHashTable.slots[slot] = entry;
333
334 if (entry->next != NULL) {
335 entry->next->prev = entry;
336 }
337
338 // we just added an entry, increase the size of the hashtable
339 gHashTable.count++;
340 }
341
342 return entry;
343}
344
345static int is_valid_entry(HashEntry* entry)
346{
347 if (entry != NULL) {
348 int i;
349 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
350 HashEntry* e1 = gHashTable.slots[i];
351
352 while (e1 != NULL) {
353 if (e1 == entry) {
354 return 1;
355 }
356
357 e1 = e1->next;
358 }
359 }
360 }
361
362 return 0;
363}
364
365static void remove_entry(HashEntry* entry)
366{
367 HashEntry* prev = entry->prev;
368 HashEntry* next = entry->next;
369
370 if (prev != NULL) entry->prev->next = next;
371 if (next != NULL) entry->next->prev = prev;
372
373 if (prev == NULL) {
374 // we are the head of the list. set the head to be next
375 gHashTable.slots[entry->slot] = entry->next;
376 }
377
378 // we just removed and entry, decrease the size of the hashtable
379 gHashTable.count--;
380}
381
382
383// =============================================================================
384// stack trace functions
385// =============================================================================
386
387typedef struct
388{
389 size_t count;
390 intptr_t* addrs;
391} stack_crawl_state_t;
392
393static _Unwind_Reason_Code trace_function(_Unwind_Context *context, void *arg)
394{
395 stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
396 if (state->count) {
397 intptr_t ip = (intptr_t)_Unwind_GetIP(context);
398 if (ip) {
399 state->addrs[0] = ip;
400 state->addrs++;
401 state->count--;
402 return _URC_NO_REASON;
403 }
404 }
405 /*
406 * If we run out of space to record the address or 0 has been seen, stop
407 * unwinding the stack.
408 */
409 return _URC_END_OF_STACK;
410}
411
412static inline
413int get_backtrace(intptr_t* addrs, size_t max_entries)
414{
415 stack_crawl_state_t state;
416 state.count = max_entries;
417 state.addrs = (intptr_t*)addrs;
418 _Unwind_Backtrace(trace_function, (void*)&state);
419 return max_entries - state.count;
420}
421
422// =============================================================================
423// malloc leak function dispatcher
424// =============================================================================
425
426static void* leak_malloc(size_t bytes);
427static void leak_free(void* mem);
428static void* leak_calloc(size_t n_elements, size_t elem_size);
429static void* leak_realloc(void* oldMem, size_t bytes);
430static void* leak_memalign(size_t alignment, size_t bytes);
431
432static void* fill_malloc(size_t bytes);
433static void fill_free(void* mem);
434static void* fill_realloc(void* oldMem, size_t bytes);
435static void* fill_memalign(size_t alignment, size_t bytes);
436
437static void* chk_malloc(size_t bytes);
438static void chk_free(void* mem);
439static void* chk_calloc(size_t n_elements, size_t elem_size);
440static void* chk_realloc(void* oldMem, size_t bytes);
441static void* chk_memalign(size_t alignment, size_t bytes);
442
443typedef struct {
444 void* (*malloc)(size_t bytes);
445 void (*free)(void* mem);
446 void* (*calloc)(size_t n_elements, size_t elem_size);
447 void* (*realloc)(void* oldMem, size_t bytes);
448 void* (*memalign)(size_t alignment, size_t bytes);
449} MallocDebug;
450
451static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
452{
453 { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
454 { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
455 { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
456 { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
457};
458
459enum {
460 INDEX_NORMAL = 0,
461 INDEX_LEAK_CHECK,
462 INDEX_MALLOC_FILL,
463 INDEX_MALLOC_CHECK,
464};
465
466static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
467static int gMallocDebugLevel;
468static int gTrapOnError = 1;
469
470void* malloc(size_t bytes) {
471 return gMallocDispatch->malloc(bytes);
472}
473void free(void* mem) {
474 gMallocDispatch->free(mem);
475}
476void* calloc(size_t n_elements, size_t elem_size) {
477 return gMallocDispatch->calloc(n_elements, elem_size);
478}
479void* realloc(void* oldMem, size_t bytes) {
480 return gMallocDispatch->realloc(oldMem, bytes);
481}
482void* memalign(size_t alignment, size_t bytes) {
483 return gMallocDispatch->memalign(alignment, bytes);
484}
485
486// =============================================================================
487// malloc check functions
488// =============================================================================
489
490#define CHK_FILL_FREE 0xef
491#define CHK_SENTINEL_VALUE 0xeb
492#define CHK_SENTINEL_HEAD_SIZE 16
493#define CHK_SENTINEL_TAIL_SIZE 16
494#define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \
495 CHK_SENTINEL_TAIL_SIZE + \
496 sizeof(size_t) )
497
498static void dump_stack_trace()
499{
500 intptr_t addrs[20];
501 int c = get_backtrace(addrs, 20);
502 char buf[16];
503 char tmp[16*20];
504 int i;
505
506 tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
507 for (i=0 ; i<c; i++) {
508 sprintf(buf, "%2d: %08x\n", i, addrs[i]);
509 strcat(tmp, buf);
510 }
511 __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
512}
513
514static int is_valid_malloc_pointer(void* addr)
515{
516 return 1;
517}
518
519static void assert_valid_malloc_pointer(void* mem)
520{
521 if (mem && !is_valid_malloc_pointer(mem)) {
522 pthread_mutex_lock(&gAllocationsMutex);
523 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
524 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
525 "*** MALLOC CHECK: buffer %p, is not a valid "
526 "malloc pointer (are you mixing up new/delete "
527 "and malloc/free?)", mem);
528 dump_stack_trace();
529 if (gTrapOnError) {
530 __builtin_trap();
531 }
532 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
533 pthread_mutex_unlock(&gAllocationsMutex);
534 }
535}
536
537static void chk_out_of_bounds_check__locked(void* buffer, size_t size)
538{
539 int i;
540 char* buf = (char*)buffer - CHK_SENTINEL_HEAD_SIZE;
541 for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
542 if (buf[i] != CHK_SENTINEL_VALUE) {
543 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
544 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
545 "*** MALLOC CHECK: buffer %p, size=%lu, "
546 "corrupted %d bytes before allocation",
547 buffer, size, CHK_SENTINEL_HEAD_SIZE-i);
548 dump_stack_trace();
549 if (gTrapOnError) {
550 __builtin_trap();
551 }
552 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
553 }
554 }
555 buf = (char*)buffer + size;
556 for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
557 if (buf[i] != CHK_SENTINEL_VALUE) {
558 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
559 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
560 "*** MALLOC CHECK: buffer %p, size=%lu, "
561 "corrupted %d bytes after allocation",
562 buffer, size, i+1);
563 dump_stack_trace();
564 if (gTrapOnError) {
565 __builtin_trap();
566 }
567 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
568 }
569 }
570}
571
572void* chk_malloc(size_t bytes)
573{
574 char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
575 if (buffer) {
576 pthread_mutex_lock(&gAllocationsMutex);
577 memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
578 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
579 *(size_t *)(buffer + offset) = bytes;
580 buffer += CHK_SENTINEL_HEAD_SIZE;
581 pthread_mutex_unlock(&gAllocationsMutex);
582 }
583 return buffer;
584}
585
586void chk_free(void* mem)
587{
588 assert_valid_malloc_pointer(mem);
589 if (mem) {
590 pthread_mutex_lock(&gAllocationsMutex);
591 char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
592 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
593 size_t bytes = *(size_t *)(buffer + offset);
594 chk_out_of_bounds_check__locked(mem, bytes);
595 pthread_mutex_unlock(&gAllocationsMutex);
596 memset(buffer, CHK_FILL_FREE, bytes);
597 dlfree(buffer);
598 }
599}
600
601void* chk_calloc(size_t n_elements, size_t elem_size)
602{
603 size_t size = n_elements * elem_size;
604 void* ptr = chk_malloc(size);
605 if (ptr != NULL) {
606 memset(ptr, 0, size);
607 }
608 return ptr;
609}
610
611void* chk_realloc(void* mem, size_t bytes)
612{
613 assert_valid_malloc_pointer(mem);
614 char* new_buffer = chk_malloc(bytes);
615 if (mem == NULL) {
616 return new_buffer;
617 }
618
619 pthread_mutex_lock(&gAllocationsMutex);
620 char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
621 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
622 size_t old_bytes = *(size_t *)(buffer + offset);
623 chk_out_of_bounds_check__locked(mem, old_bytes);
624 pthread_mutex_unlock(&gAllocationsMutex);
625
626 if (new_buffer) {
627 size_t size = (bytes < old_bytes)?(bytes):(old_bytes);
628 memcpy(new_buffer, mem, size);
629 chk_free(mem);
630 }
631
632 return new_buffer;
633}
634
635void* chk_memalign(size_t alignment, size_t bytes)
636{
637 // XXX: it's better to use malloc, than being wrong
638 return chk_malloc(bytes);
639}
640
641// =============================================================================
642// malloc fill functions
643// =============================================================================
644
645void* fill_malloc(size_t bytes)
646{
647 void* buffer = dlmalloc(bytes);
648 if (buffer) {
649 memset(buffer, CHK_SENTINEL_VALUE, bytes);
650 }
651 return buffer;
652}
653
654void fill_free(void* mem)
655{
656 size_t bytes = dlmalloc_usable_size(mem);
657 memset(mem, CHK_FILL_FREE, bytes);
658 dlfree(mem);
659}
660
661void* fill_realloc(void* mem, size_t bytes)
662{
663 void* buffer = fill_malloc(bytes);
664 if (mem == NULL) {
665 return buffer;
666 }
667 if (buffer) {
668 size_t old_size = dlmalloc_usable_size(mem);
669 size_t size = (bytes < old_size)?(bytes):(old_size);
670 memcpy(buffer, mem, size);
671 fill_free(mem);
672 }
673 return buffer;
674}
675
676void* fill_memalign(size_t alignment, size_t bytes)
677{
678 void* buffer = dlmemalign(alignment, bytes);
679 if (buffer) {
680 memset(buffer, CHK_SENTINEL_VALUE, bytes);
681 }
682 return buffer;
683}
684
685// =============================================================================
686// malloc leak functions
687// =============================================================================
688
689#define MEMALIGN_GUARD ((void*)0xA1A41520)
690
691void* leak_malloc(size_t bytes)
692{
693 // allocate enough space infront of the allocation to store the pointer for
694 // the alloc structure. This will making free'ing the structer really fast!
695
696 // 1. allocate enough memory and include our header
697 // 2. set the base pointer to be right after our header
698
699 void* base = dlmalloc(bytes + sizeof(AllocationEntry));
700 if (base != NULL) {
701 pthread_mutex_lock(&gAllocationsMutex);
702
703 intptr_t backtrace[BACKTRACE_SIZE];
704 size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
705
706 AllocationEntry* header = (AllocationEntry*)base;
707 header->entry = record_backtrace(backtrace, numEntries, bytes);
708 header->guard = GUARD;
709
710 // now increment base to point to after our header.
711 // this should just work since our header is 8 bytes.
712 base = (AllocationEntry*)base + 1;
713
714 pthread_mutex_unlock(&gAllocationsMutex);
715 }
716
717 return base;
718}
719
720void leak_free(void* mem)
721{
722 if (mem != NULL) {
723 pthread_mutex_lock(&gAllocationsMutex);
724
725 // check the guard to make sure it is valid
726 AllocationEntry* header = (AllocationEntry*)mem - 1;
727
728 if (header->guard != GUARD) {
729 // could be a memaligned block
730 if (((void**)mem)[-1] == MEMALIGN_GUARD) {
731 mem = ((void**)mem)[-2];
732 header = (AllocationEntry*)mem - 1;
733 }
734 }
735
736 if (header->guard == GUARD || is_valid_entry(header->entry)) {
737 // decrement the allocations
738 HashEntry* entry = header->entry;
739 entry->allocations--;
740 if (entry->allocations <= 0) {
741 remove_entry(entry);
742 dlfree(entry);
743 }
744
745 // now free the memory!
746 dlfree(header);
747 } else {
748 debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
749 header->guard, header->entry);
750 }
751
752 pthread_mutex_unlock(&gAllocationsMutex);
753 }
754}
755
756void* leak_calloc(size_t n_elements, size_t elem_size)
757{
758 size_t size = n_elements * elem_size;
759 void* ptr = leak_malloc(size);
760 if (ptr != NULL) {
761 memset(ptr, 0, size);
762 }
763 return ptr;
764}
765
766void* leak_realloc(void* oldMem, size_t bytes)
767{
768 if (oldMem == NULL) {
769 return leak_malloc(bytes);
770 }
771 void* newMem = NULL;
772 AllocationEntry* header = (AllocationEntry*)oldMem - 1;
773 if (header && header->guard == GUARD) {
774 size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
775 newMem = leak_malloc(bytes);
776 if (newMem != NULL) {
777 size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
778 memcpy(newMem, oldMem, copySize);
779 leak_free(oldMem);
780 }
781 } else {
782 newMem = dlrealloc(oldMem, bytes);
783 }
784 return newMem;
785}
786
787void* leak_memalign(size_t alignment, size_t bytes)
788{
789 // we can just use malloc
790 if (alignment <= MALLOC_ALIGNMENT)
791 return leak_malloc(bytes);
792
793 // need to make sure it's a power of two
794 if (alignment & (alignment-1))
795 alignment = 1L << (31 - __builtin_clz(alignment));
796
797 // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
798 // we will align by at least MALLOC_ALIGNMENT bytes
799 // and at most alignment-MALLOC_ALIGNMENT bytes
800 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
801 void* base = leak_malloc(size);
802 if (base != NULL) {
803 intptr_t ptr = (intptr_t)base;
804 if ((ptr % alignment) == 0)
805 return base;
806
807 // align the pointer
808 ptr += ((-ptr) % alignment);
809
810 // there is always enough space for the base pointer and the guard
811 ((void**)ptr)[-1] = MEMALIGN_GUARD;
812 ((void**)ptr)[-2] = base;
813
814 return (void*)ptr;
815 }
816 return base;
817}
818#endif /* MALLOC_LEAK_CHECK */
819
820// called from libc_init()
821extern char* __progname;
822
823void malloc_debug_init()
824{
825 unsigned int level = 0;
826#ifdef MALLOC_LEAK_CHECK
827 // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
828 level = 1;
829#endif
830 char env[PROP_VALUE_MAX];
831 int len = __system_property_get("libc.debug.malloc", env);
832
833 if (len) {
834 level = atoi(env);
835#ifndef MALLOC_LEAK_CHECK
836 /* Alert the user that libc_debug.so needs to be installed as libc.so
837 * when performing malloc checks.
838 */
839 if (level != 0) {
840 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
841 "Malloc checks need libc_debug.so pushed to the device!\n");
842
843 }
844#endif
845 }
846
847#ifdef MALLOC_LEAK_CHECK
848 gMallocDebugLevel = level;
849 switch (level) {
850 default:
851 case 0:
852 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
853 break;
854 case 1:
855 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
856 "%s using MALLOC_DEBUG = %d (leak checker)\n",
857 __progname, level);
858 gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
859 break;
860 case 5:
861 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
862 "%s using MALLOC_DEBUG = %d (fill)\n",
863 __progname, level);
864 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
865 break;
866 case 10:
867 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
868 "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
869 __progname, level);
870 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
871 break;
872 }
873#endif
874}