blob: 5ddc91329e6c030e1cfa081e7892f482f11727f0 [file] [log] [blame]
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <errno.h>
29#include <pthread.h>
30#include <stdio.h>
31#include <arpa/inet.h>
32#include <sys/socket.h>
33#include <stdlib.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <stddef.h>
38#include <stdarg.h>
39#include <fcntl.h>
40#include <unwind.h>
41
42#include <sys/socket.h>
43#include <sys/un.h>
44#include <sys/select.h>
45#include <sys/types.h>
46#include <sys/system_properties.h>
47
48#include "dlmalloc.h"
49#include "logd.h"
50
51// =============================================================================
52// Utilities directly used by Dalvik
53// =============================================================================
54
55#define HASHTABLE_SIZE 1543
56#define BACKTRACE_SIZE 32
57/* flag definitions, currently sharing storage with "size" */
58#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
59#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
60
61/*
62 * In a VM process, this is set to 1 after fork()ing out of zygote.
63 */
64int gMallocLeakZygoteChild = 0;
65
66// =============================================================================
67// Structures
68// =============================================================================
69
70typedef struct HashEntry HashEntry;
71struct HashEntry {
72 size_t slot;
73 HashEntry* prev;
74 HashEntry* next;
75 size_t numEntries;
76 // fields above "size" are NOT sent to the host
77 size_t size;
78 size_t allocations;
79 intptr_t backtrace[0];
80};
81
82typedef struct HashTable HashTable;
83struct HashTable {
84 size_t count;
85 HashEntry* slots[HASHTABLE_SIZE];
86};
87
88static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
89static HashTable gHashTable;
90
91// =============================================================================
92// output fucntions
93// =============================================================================
94
95static int hash_entry_compare(const void* arg1, const void* arg2)
96{
97 HashEntry* e1 = *(HashEntry**)arg1;
98 HashEntry* e2 = *(HashEntry**)arg2;
99
100 size_t nbAlloc1 = e1->allocations;
101 size_t nbAlloc2 = e2->allocations;
102 size_t size1 = e1->size & ~SIZE_FLAG_MASK;
103 size_t size2 = e2->size & ~SIZE_FLAG_MASK;
104 size_t alloc1 = nbAlloc1 * size1;
105 size_t alloc2 = nbAlloc2 * size2;
106
107 // sort in descending order by:
108 // 1) total size
109 // 2) number of allocations
110 //
111 // This is used for sorting, not determination of equality, so we don't
112 // need to compare the bit flags.
113 int result;
114 if (alloc1 > alloc2) {
115 result = -1;
116 } else if (alloc1 < alloc2) {
117 result = 1;
118 } else {
119 if (nbAlloc1 > nbAlloc2) {
120 result = -1;
121 } else if (nbAlloc1 < nbAlloc2) {
122 result = 1;
123 } else {
124 result = 0;
125 }
126 }
127 return result;
128}
129
130/*
131 * Retrieve native heap information.
132 *
133 * "*info" is set to a buffer we allocate
134 * "*overallSize" is set to the size of the "info" buffer
135 * "*infoSize" is set to the size of a single entry
136 * "*totalMemory" is set to the sum of all allocations we're tracking; does
137 * not include heap overhead
138 * "*backtraceSize" is set to the maximum number of entries in the back trace
139 */
140void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
141 size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
142{
143 // don't do anything if we have invalid arguments
144 if (info == NULL || overallSize == NULL || infoSize == NULL ||
145 totalMemory == NULL || backtraceSize == NULL) {
146 return;
147 }
148
149 pthread_mutex_lock(&gAllocationsMutex);
150
151 if (gHashTable.count == 0) {
152 *info = NULL;
153 *overallSize = 0;
154 *infoSize = 0;
155 *totalMemory = 0;
156 *backtraceSize = 0;
157 goto done;
158 }
159
160 void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
161
162 // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
163 // debug_log("list = %p\n", list);
164
165 // get the entries into an array to be sorted
166 int index = 0;
167 int i;
168 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
169 HashEntry* entry = gHashTable.slots[i];
170 while (entry != NULL) {
171 list[index] = entry;
172 *totalMemory = *totalMemory +
173 ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
174 index++;
175 entry = entry->next;
176 }
177 }
178
179 // debug_log("sorted list!\n");
180 // XXX: the protocol doesn't allow variable size for the stack trace (yet)
181 *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
182 *overallSize = *infoSize * gHashTable.count;
183 *backtraceSize = BACKTRACE_SIZE;
184
185 // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
186 // now get A byte array big enough for this
187 *info = (uint8_t*)dlmalloc(*overallSize);
188
189 // debug_log("info = %p\n", info);
190 if (*info == NULL) {
191 *overallSize = 0;
192 goto done;
193 }
194
195 // debug_log("sorting list...\n");
196 qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
197
198 uint8_t* head = *info;
199 const int count = gHashTable.count;
200 for (i = 0 ; i < count ; i++) {
201 HashEntry* entry = list[i];
202 size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
203 if (entrySize < *infoSize) {
204 /* we're writing less than a full entry, clear out the rest */
205 /* TODO: only clear out the part we're not overwriting? */
206 memset(head, 0, *infoSize);
207 } else {
208 /* make sure the amount we're copying doesn't exceed the limit */
209 entrySize = *infoSize;
210 }
211 memcpy(head, &(entry->size), entrySize);
212 head += *infoSize;
213 }
214
215 dlfree(list);
216
217done:
218 // debug_log("+++++ done!\n");
219 pthread_mutex_unlock(&gAllocationsMutex);
220}
221
222void free_malloc_leak_info(uint8_t* info)
223{
224 dlfree(info);
225}
226
227struct mallinfo mallinfo()
228{
229 return dlmallinfo();
230}
231
232void* valloc(size_t bytes) {
233 /* assume page size of 4096 bytes */
234 return memalign( getpagesize(), bytes );
235}
236
237
238/*
239 * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
240 * enabled. Currently we exclude them in libc.so, and only include them in
241 * libc_debug.so.
242 */
243#ifdef MALLOC_LEAK_CHECK
244#define MALLOC_ALIGNMENT 8
245#define GUARD 0x48151642
246
247#define DEBUG 0
248
249// =============================================================================
250// Structures
251// =============================================================================
252typedef struct AllocationEntry AllocationEntry;
253struct AllocationEntry {
254 HashEntry* entry;
255 uint32_t guard;
256};
257
258// =============================================================================
259// log funtions
260// =============================================================================
261
262#define debug_log(format, ...) \
263 __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
264
265// =============================================================================
266// Hash Table functions
267// =============================================================================
268static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
269{
270 if (backtrace == NULL) return 0;
271
272 int hash = 0;
273 size_t i;
274 for (i = 0 ; i < numEntries ; i++) {
275 hash = (hash * 33) + (backtrace[i] >> 2);
276 }
277
278 return hash;
279}
280
281static HashEntry* find_entry(HashTable* table, int slot,
282 intptr_t* backtrace, size_t numEntries, size_t size)
283{
284 HashEntry* entry = table->slots[slot];
285 while (entry != NULL) {
286 //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
287 // backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
288 /*
289 * See if the entry matches exactly. We compare the "size" field,
290 * including the flag bits.
291 */
292 if (entry->size == size && entry->numEntries == numEntries &&
293 !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
294 return entry;
295 }
296
297 entry = entry->next;
298 }
299
300 return NULL;
301}
302
303static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
304{
305 size_t hash = get_hash(backtrace, numEntries);
306 size_t slot = hash % HASHTABLE_SIZE;
307
308 if (size & SIZE_FLAG_MASK) {
309 debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
310 abort();
311 }
312
313 if (gMallocLeakZygoteChild)
314 size |= SIZE_FLAG_ZYGOTE_CHILD;
315
316 HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
317
318 if (entry != NULL) {
319 entry->allocations++;
320 } else {
321 // create a new entry
322 entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
323 entry->allocations = 1;
324 entry->slot = slot;
325 entry->prev = NULL;
326 entry->next = gHashTable.slots[slot];
327 entry->numEntries = numEntries;
328 entry->size = size;
329
330 memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
331
332 gHashTable.slots[slot] = entry;
333
334 if (entry->next != NULL) {
335 entry->next->prev = entry;
336 }
337
338 // we just added an entry, increase the size of the hashtable
339 gHashTable.count++;
340 }
341
342 return entry;
343}
344
345static int is_valid_entry(HashEntry* entry)
346{
347 if (entry != NULL) {
348 int i;
349 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
350 HashEntry* e1 = gHashTable.slots[i];
351
352 while (e1 != NULL) {
353 if (e1 == entry) {
354 return 1;
355 }
356
357 e1 = e1->next;
358 }
359 }
360 }
361
362 return 0;
363}
364
365static void remove_entry(HashEntry* entry)
366{
367 HashEntry* prev = entry->prev;
368 HashEntry* next = entry->next;
369
370 if (prev != NULL) entry->prev->next = next;
371 if (next != NULL) entry->next->prev = prev;
372
373 if (prev == NULL) {
374 // we are the head of the list. set the head to be next
375 gHashTable.slots[entry->slot] = entry->next;
376 }
377
378 // we just removed and entry, decrease the size of the hashtable
379 gHashTable.count--;
380}
381
382
383// =============================================================================
384// stack trace functions
385// =============================================================================
386
387typedef struct
388{
389 size_t count;
390 intptr_t* addrs;
391} stack_crawl_state_t;
392
The Android Open Source Project4e468ed2008-12-17 18:03:48 -0800393
394/* depends how the system includes define this */
395#ifdef HAVE_UNWIND_CONTEXT_STRUCT
396typedef struct _Unwind_Context __unwind_context;
397#else
398typedef _Unwind_Context __unwind_context;
399#endif
400
401static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700402{
403 stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
404 if (state->count) {
405 intptr_t ip = (intptr_t)_Unwind_GetIP(context);
406 if (ip) {
407 state->addrs[0] = ip;
408 state->addrs++;
409 state->count--;
410 return _URC_NO_REASON;
411 }
412 }
413 /*
414 * If we run out of space to record the address or 0 has been seen, stop
415 * unwinding the stack.
416 */
417 return _URC_END_OF_STACK;
418}
419
420static inline
421int get_backtrace(intptr_t* addrs, size_t max_entries)
422{
423 stack_crawl_state_t state;
424 state.count = max_entries;
425 state.addrs = (intptr_t*)addrs;
426 _Unwind_Backtrace(trace_function, (void*)&state);
427 return max_entries - state.count;
428}
429
430// =============================================================================
431// malloc leak function dispatcher
432// =============================================================================
433
434static void* leak_malloc(size_t bytes);
435static void leak_free(void* mem);
436static void* leak_calloc(size_t n_elements, size_t elem_size);
437static void* leak_realloc(void* oldMem, size_t bytes);
438static void* leak_memalign(size_t alignment, size_t bytes);
439
440static void* fill_malloc(size_t bytes);
441static void fill_free(void* mem);
442static void* fill_realloc(void* oldMem, size_t bytes);
443static void* fill_memalign(size_t alignment, size_t bytes);
444
445static void* chk_malloc(size_t bytes);
446static void chk_free(void* mem);
447static void* chk_calloc(size_t n_elements, size_t elem_size);
448static void* chk_realloc(void* oldMem, size_t bytes);
449static void* chk_memalign(size_t alignment, size_t bytes);
450
451typedef struct {
452 void* (*malloc)(size_t bytes);
453 void (*free)(void* mem);
454 void* (*calloc)(size_t n_elements, size_t elem_size);
455 void* (*realloc)(void* oldMem, size_t bytes);
456 void* (*memalign)(size_t alignment, size_t bytes);
457} MallocDebug;
458
459static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
460{
461 { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
462 { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
463 { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
464 { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
465};
466
467enum {
468 INDEX_NORMAL = 0,
469 INDEX_LEAK_CHECK,
470 INDEX_MALLOC_FILL,
471 INDEX_MALLOC_CHECK,
472};
473
474static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
475static int gMallocDebugLevel;
476static int gTrapOnError = 1;
477
478void* malloc(size_t bytes) {
479 return gMallocDispatch->malloc(bytes);
480}
481void free(void* mem) {
482 gMallocDispatch->free(mem);
483}
484void* calloc(size_t n_elements, size_t elem_size) {
485 return gMallocDispatch->calloc(n_elements, elem_size);
486}
487void* realloc(void* oldMem, size_t bytes) {
488 return gMallocDispatch->realloc(oldMem, bytes);
489}
490void* memalign(size_t alignment, size_t bytes) {
491 return gMallocDispatch->memalign(alignment, bytes);
492}
493
494// =============================================================================
495// malloc check functions
496// =============================================================================
497
498#define CHK_FILL_FREE 0xef
499#define CHK_SENTINEL_VALUE 0xeb
500#define CHK_SENTINEL_HEAD_SIZE 16
501#define CHK_SENTINEL_TAIL_SIZE 16
502#define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \
503 CHK_SENTINEL_TAIL_SIZE + \
504 sizeof(size_t) )
505
506static void dump_stack_trace()
507{
508 intptr_t addrs[20];
509 int c = get_backtrace(addrs, 20);
510 char buf[16];
511 char tmp[16*20];
512 int i;
513
514 tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
515 for (i=0 ; i<c; i++) {
516 sprintf(buf, "%2d: %08x\n", i, addrs[i]);
517 strcat(tmp, buf);
518 }
519 __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
520}
521
522static int is_valid_malloc_pointer(void* addr)
523{
524 return 1;
525}
526
527static void assert_valid_malloc_pointer(void* mem)
528{
529 if (mem && !is_valid_malloc_pointer(mem)) {
530 pthread_mutex_lock(&gAllocationsMutex);
531 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
532 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
533 "*** MALLOC CHECK: buffer %p, is not a valid "
534 "malloc pointer (are you mixing up new/delete "
535 "and malloc/free?)", mem);
536 dump_stack_trace();
537 if (gTrapOnError) {
538 __builtin_trap();
539 }
540 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
541 pthread_mutex_unlock(&gAllocationsMutex);
542 }
543}
544
545static void chk_out_of_bounds_check__locked(void* buffer, size_t size)
546{
547 int i;
548 char* buf = (char*)buffer - CHK_SENTINEL_HEAD_SIZE;
549 for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
550 if (buf[i] != CHK_SENTINEL_VALUE) {
551 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
552 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
553 "*** MALLOC CHECK: buffer %p, size=%lu, "
554 "corrupted %d bytes before allocation",
555 buffer, size, CHK_SENTINEL_HEAD_SIZE-i);
556 dump_stack_trace();
557 if (gTrapOnError) {
558 __builtin_trap();
559 }
560 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
561 }
562 }
563 buf = (char*)buffer + size;
564 for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
565 if (buf[i] != CHK_SENTINEL_VALUE) {
566 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
567 __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
568 "*** MALLOC CHECK: buffer %p, size=%lu, "
569 "corrupted %d bytes after allocation",
570 buffer, size, i+1);
571 dump_stack_trace();
572 if (gTrapOnError) {
573 __builtin_trap();
574 }
575 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
576 }
577 }
578}
579
580void* chk_malloc(size_t bytes)
581{
582 char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
583 if (buffer) {
584 pthread_mutex_lock(&gAllocationsMutex);
585 memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
586 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
587 *(size_t *)(buffer + offset) = bytes;
588 buffer += CHK_SENTINEL_HEAD_SIZE;
589 pthread_mutex_unlock(&gAllocationsMutex);
590 }
591 return buffer;
592}
593
594void chk_free(void* mem)
595{
596 assert_valid_malloc_pointer(mem);
597 if (mem) {
598 pthread_mutex_lock(&gAllocationsMutex);
599 char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
600 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
601 size_t bytes = *(size_t *)(buffer + offset);
602 chk_out_of_bounds_check__locked(mem, bytes);
603 pthread_mutex_unlock(&gAllocationsMutex);
604 memset(buffer, CHK_FILL_FREE, bytes);
605 dlfree(buffer);
606 }
607}
608
609void* chk_calloc(size_t n_elements, size_t elem_size)
610{
611 size_t size = n_elements * elem_size;
612 void* ptr = chk_malloc(size);
613 if (ptr != NULL) {
614 memset(ptr, 0, size);
615 }
616 return ptr;
617}
618
619void* chk_realloc(void* mem, size_t bytes)
620{
621 assert_valid_malloc_pointer(mem);
622 char* new_buffer = chk_malloc(bytes);
623 if (mem == NULL) {
624 return new_buffer;
625 }
626
627 pthread_mutex_lock(&gAllocationsMutex);
628 char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
629 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
630 size_t old_bytes = *(size_t *)(buffer + offset);
631 chk_out_of_bounds_check__locked(mem, old_bytes);
632 pthread_mutex_unlock(&gAllocationsMutex);
633
634 if (new_buffer) {
635 size_t size = (bytes < old_bytes)?(bytes):(old_bytes);
636 memcpy(new_buffer, mem, size);
637 chk_free(mem);
638 }
639
640 return new_buffer;
641}
642
643void* chk_memalign(size_t alignment, size_t bytes)
644{
645 // XXX: it's better to use malloc, than being wrong
646 return chk_malloc(bytes);
647}
648
649// =============================================================================
650// malloc fill functions
651// =============================================================================
652
653void* fill_malloc(size_t bytes)
654{
655 void* buffer = dlmalloc(bytes);
656 if (buffer) {
657 memset(buffer, CHK_SENTINEL_VALUE, bytes);
658 }
659 return buffer;
660}
661
662void fill_free(void* mem)
663{
664 size_t bytes = dlmalloc_usable_size(mem);
665 memset(mem, CHK_FILL_FREE, bytes);
666 dlfree(mem);
667}
668
669void* fill_realloc(void* mem, size_t bytes)
670{
671 void* buffer = fill_malloc(bytes);
672 if (mem == NULL) {
673 return buffer;
674 }
675 if (buffer) {
676 size_t old_size = dlmalloc_usable_size(mem);
677 size_t size = (bytes < old_size)?(bytes):(old_size);
678 memcpy(buffer, mem, size);
679 fill_free(mem);
680 }
681 return buffer;
682}
683
684void* fill_memalign(size_t alignment, size_t bytes)
685{
686 void* buffer = dlmemalign(alignment, bytes);
687 if (buffer) {
688 memset(buffer, CHK_SENTINEL_VALUE, bytes);
689 }
690 return buffer;
691}
692
693// =============================================================================
694// malloc leak functions
695// =============================================================================
696
697#define MEMALIGN_GUARD ((void*)0xA1A41520)
698
699void* leak_malloc(size_t bytes)
700{
701 // allocate enough space infront of the allocation to store the pointer for
702 // the alloc structure. This will making free'ing the structer really fast!
703
704 // 1. allocate enough memory and include our header
705 // 2. set the base pointer to be right after our header
706
707 void* base = dlmalloc(bytes + sizeof(AllocationEntry));
708 if (base != NULL) {
709 pthread_mutex_lock(&gAllocationsMutex);
710
711 intptr_t backtrace[BACKTRACE_SIZE];
712 size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
713
714 AllocationEntry* header = (AllocationEntry*)base;
715 header->entry = record_backtrace(backtrace, numEntries, bytes);
716 header->guard = GUARD;
717
718 // now increment base to point to after our header.
719 // this should just work since our header is 8 bytes.
720 base = (AllocationEntry*)base + 1;
721
722 pthread_mutex_unlock(&gAllocationsMutex);
723 }
724
725 return base;
726}
727
728void leak_free(void* mem)
729{
730 if (mem != NULL) {
731 pthread_mutex_lock(&gAllocationsMutex);
732
733 // check the guard to make sure it is valid
734 AllocationEntry* header = (AllocationEntry*)mem - 1;
735
736 if (header->guard != GUARD) {
737 // could be a memaligned block
738 if (((void**)mem)[-1] == MEMALIGN_GUARD) {
739 mem = ((void**)mem)[-2];
740 header = (AllocationEntry*)mem - 1;
741 }
742 }
743
744 if (header->guard == GUARD || is_valid_entry(header->entry)) {
745 // decrement the allocations
746 HashEntry* entry = header->entry;
747 entry->allocations--;
748 if (entry->allocations <= 0) {
749 remove_entry(entry);
750 dlfree(entry);
751 }
752
753 // now free the memory!
754 dlfree(header);
755 } else {
756 debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
757 header->guard, header->entry);
758 }
759
760 pthread_mutex_unlock(&gAllocationsMutex);
761 }
762}
763
764void* leak_calloc(size_t n_elements, size_t elem_size)
765{
766 size_t size = n_elements * elem_size;
767 void* ptr = leak_malloc(size);
768 if (ptr != NULL) {
769 memset(ptr, 0, size);
770 }
771 return ptr;
772}
773
774void* leak_realloc(void* oldMem, size_t bytes)
775{
776 if (oldMem == NULL) {
777 return leak_malloc(bytes);
778 }
779 void* newMem = NULL;
780 AllocationEntry* header = (AllocationEntry*)oldMem - 1;
781 if (header && header->guard == GUARD) {
782 size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
783 newMem = leak_malloc(bytes);
784 if (newMem != NULL) {
785 size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
786 memcpy(newMem, oldMem, copySize);
787 leak_free(oldMem);
788 }
789 } else {
790 newMem = dlrealloc(oldMem, bytes);
791 }
792 return newMem;
793}
794
795void* leak_memalign(size_t alignment, size_t bytes)
796{
797 // we can just use malloc
798 if (alignment <= MALLOC_ALIGNMENT)
799 return leak_malloc(bytes);
800
801 // need to make sure it's a power of two
802 if (alignment & (alignment-1))
803 alignment = 1L << (31 - __builtin_clz(alignment));
804
805 // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
806 // we will align by at least MALLOC_ALIGNMENT bytes
807 // and at most alignment-MALLOC_ALIGNMENT bytes
808 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
809 void* base = leak_malloc(size);
810 if (base != NULL) {
811 intptr_t ptr = (intptr_t)base;
812 if ((ptr % alignment) == 0)
813 return base;
814
815 // align the pointer
816 ptr += ((-ptr) % alignment);
817
818 // there is always enough space for the base pointer and the guard
819 ((void**)ptr)[-1] = MEMALIGN_GUARD;
820 ((void**)ptr)[-2] = base;
821
822 return (void*)ptr;
823 }
824 return base;
825}
826#endif /* MALLOC_LEAK_CHECK */
827
828// called from libc_init()
829extern char* __progname;
830
831void malloc_debug_init()
832{
833 unsigned int level = 0;
834#ifdef MALLOC_LEAK_CHECK
835 // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
836 level = 1;
837#endif
838 char env[PROP_VALUE_MAX];
839 int len = __system_property_get("libc.debug.malloc", env);
840
841 if (len) {
842 level = atoi(env);
843#ifndef MALLOC_LEAK_CHECK
844 /* Alert the user that libc_debug.so needs to be installed as libc.so
845 * when performing malloc checks.
846 */
847 if (level != 0) {
848 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
849 "Malloc checks need libc_debug.so pushed to the device!\n");
850
851 }
852#endif
853 }
854
855#ifdef MALLOC_LEAK_CHECK
856 gMallocDebugLevel = level;
857 switch (level) {
858 default:
859 case 0:
860 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
861 break;
862 case 1:
863 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
864 "%s using MALLOC_DEBUG = %d (leak checker)\n",
865 __progname, level);
866 gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
867 break;
868 case 5:
869 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
870 "%s using MALLOC_DEBUG = %d (fill)\n",
871 __progname, level);
872 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
873 break;
874 case 10:
875 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
876 "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
877 __progname, level);
878 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
879 break;
880 }
881#endif
882}