blob: df09424b3f92d924c8f00843b6e9a368204e9d1c [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <errno.h>
29#include <pthread.h>
30#include <stdio.h>
31#include <arpa/inet.h>
32#include <sys/socket.h>
33#include <stdlib.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <stddef.h>
38#include <stdarg.h>
39#include <fcntl.h>
40#include <unwind.h>
41
42#include <sys/socket.h>
43#include <sys/un.h>
44#include <sys/select.h>
45#include <sys/types.h>
46#include <sys/system_properties.h>
47
48#include "dlmalloc.h"
49#include "logd.h"
50
51// =============================================================================
52// Utilities directly used by Dalvik
53// =============================================================================
54
55#define HASHTABLE_SIZE 1543
56#define BACKTRACE_SIZE 32
57/* flag definitions, currently sharing storage with "size" */
58#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
59#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
60
61#define MAX_SIZE_T (~(size_t)0)
62
63/*
64 * In a VM process, this is set to 1 after fork()ing out of zygote.
65 */
66int gMallocLeakZygoteChild = 0;
67
68// =============================================================================
69// Structures
70// =============================================================================
71
72typedef struct HashEntry HashEntry;
73struct HashEntry {
74 size_t slot;
75 HashEntry* prev;
76 HashEntry* next;
77 size_t numEntries;
78 // fields above "size" are NOT sent to the host
79 size_t size;
80 size_t allocations;
81 intptr_t backtrace[0];
82};
83
84typedef struct HashTable HashTable;
85struct HashTable {
86 size_t count;
87 HashEntry* slots[HASHTABLE_SIZE];
88};
89
90static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
91static HashTable gHashTable;
92
93// =============================================================================
94// output fucntions
95// =============================================================================
96
97static int hash_entry_compare(const void* arg1, const void* arg2)
98{
99 HashEntry* e1 = *(HashEntry**)arg1;
100 HashEntry* e2 = *(HashEntry**)arg2;
101
102 size_t nbAlloc1 = e1->allocations;
103 size_t nbAlloc2 = e2->allocations;
104 size_t size1 = e1->size & ~SIZE_FLAG_MASK;
105 size_t size2 = e2->size & ~SIZE_FLAG_MASK;
106 size_t alloc1 = nbAlloc1 * size1;
107 size_t alloc2 = nbAlloc2 * size2;
108
109 // sort in descending order by:
110 // 1) total size
111 // 2) number of allocations
112 //
113 // This is used for sorting, not determination of equality, so we don't
114 // need to compare the bit flags.
115 int result;
116 if (alloc1 > alloc2) {
117 result = -1;
118 } else if (alloc1 < alloc2) {
119 result = 1;
120 } else {
121 if (nbAlloc1 > nbAlloc2) {
122 result = -1;
123 } else if (nbAlloc1 < nbAlloc2) {
124 result = 1;
125 } else {
126 result = 0;
127 }
128 }
129 return result;
130}
131
132/*
133 * Retrieve native heap information.
134 *
135 * "*info" is set to a buffer we allocate
136 * "*overallSize" is set to the size of the "info" buffer
137 * "*infoSize" is set to the size of a single entry
138 * "*totalMemory" is set to the sum of all allocations we're tracking; does
139 * not include heap overhead
140 * "*backtraceSize" is set to the maximum number of entries in the back trace
141 */
142void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
143 size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
144{
145 // don't do anything if we have invalid arguments
146 if (info == NULL || overallSize == NULL || infoSize == NULL ||
147 totalMemory == NULL || backtraceSize == NULL) {
148 return;
149 }
150
151 pthread_mutex_lock(&gAllocationsMutex);
152
153 if (gHashTable.count == 0) {
154 *info = NULL;
155 *overallSize = 0;
156 *infoSize = 0;
157 *totalMemory = 0;
158 *backtraceSize = 0;
159 goto done;
160 }
161
162 void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
163
164 // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
165 // debug_log("list = %p\n", list);
166
167 // get the entries into an array to be sorted
168 int index = 0;
169 int i;
170 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
171 HashEntry* entry = gHashTable.slots[i];
172 while (entry != NULL) {
173 list[index] = entry;
174 *totalMemory = *totalMemory +
175 ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
176 index++;
177 entry = entry->next;
178 }
179 }
180
181 // debug_log("sorted list!\n");
182 // XXX: the protocol doesn't allow variable size for the stack trace (yet)
183 *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
184 *overallSize = *infoSize * gHashTable.count;
185 *backtraceSize = BACKTRACE_SIZE;
186
187 // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
188 // now get A byte array big enough for this
189 *info = (uint8_t*)dlmalloc(*overallSize);
190
191 // debug_log("info = %p\n", info);
192 if (*info == NULL) {
193 *overallSize = 0;
194 goto done;
195 }
196
197 // debug_log("sorting list...\n");
198 qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
199
200 uint8_t* head = *info;
201 const int count = gHashTable.count;
202 for (i = 0 ; i < count ; i++) {
203 HashEntry* entry = list[i];
204 size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
205 if (entrySize < *infoSize) {
206 /* we're writing less than a full entry, clear out the rest */
207 /* TODO: only clear out the part we're not overwriting? */
208 memset(head, 0, *infoSize);
209 } else {
210 /* make sure the amount we're copying doesn't exceed the limit */
211 entrySize = *infoSize;
212 }
213 memcpy(head, &(entry->size), entrySize);
214 head += *infoSize;
215 }
216
217 dlfree(list);
218
219done:
220 // debug_log("+++++ done!\n");
221 pthread_mutex_unlock(&gAllocationsMutex);
222}
223
224void free_malloc_leak_info(uint8_t* info)
225{
226 dlfree(info);
227}
228
229struct mallinfo mallinfo()
230{
231 return dlmallinfo();
232}
233
234void* valloc(size_t bytes) {
235 /* assume page size of 4096 bytes */
236 return memalign( getpagesize(), bytes );
237}
238
239
240/*
241 * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
242 * enabled. Currently we exclude them in libc.so, and only include them in
243 * libc_debug.so.
244 */
245#ifdef MALLOC_LEAK_CHECK
246#define MALLOC_ALIGNMENT 8
247#define GUARD 0x48151642
248
249#define DEBUG 0
250
251// =============================================================================
252// Structures
253// =============================================================================
254typedef struct AllocationEntry AllocationEntry;
255struct AllocationEntry {
256 HashEntry* entry;
257 uint32_t guard;
258};
259
260// =============================================================================
261// log funtions
262// =============================================================================
263
264#define debug_log(format, ...) \
265 __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
266
267// =============================================================================
268// Hash Table functions
269// =============================================================================
270static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
271{
272 if (backtrace == NULL) return 0;
273
274 int hash = 0;
275 size_t i;
276 for (i = 0 ; i < numEntries ; i++) {
277 hash = (hash * 33) + (backtrace[i] >> 2);
278 }
279
280 return hash;
281}
282
283static HashEntry* find_entry(HashTable* table, int slot,
284 intptr_t* backtrace, size_t numEntries, size_t size)
285{
286 HashEntry* entry = table->slots[slot];
287 while (entry != NULL) {
288 //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
289 // backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
290 /*
291 * See if the entry matches exactly. We compare the "size" field,
292 * including the flag bits.
293 */
294 if (entry->size == size && entry->numEntries == numEntries &&
295 !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
296 return entry;
297 }
298
299 entry = entry->next;
300 }
301
302 return NULL;
303}
304
305static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
306{
307 size_t hash = get_hash(backtrace, numEntries);
308 size_t slot = hash % HASHTABLE_SIZE;
309
310 if (size & SIZE_FLAG_MASK) {
311 debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
312 abort();
313 }
314
315 if (gMallocLeakZygoteChild)
316 size |= SIZE_FLAG_ZYGOTE_CHILD;
317
318 HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
319
320 if (entry != NULL) {
321 entry->allocations++;
322 } else {
323 // create a new entry
324 entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
325 entry->allocations = 1;
326 entry->slot = slot;
327 entry->prev = NULL;
328 entry->next = gHashTable.slots[slot];
329 entry->numEntries = numEntries;
330 entry->size = size;
331
332 memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
333
334 gHashTable.slots[slot] = entry;
335
336 if (entry->next != NULL) {
337 entry->next->prev = entry;
338 }
339
340 // we just added an entry, increase the size of the hashtable
341 gHashTable.count++;
342 }
343
344 return entry;
345}
346
347static int is_valid_entry(HashEntry* entry)
348{
349 if (entry != NULL) {
350 int i;
351 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
352 HashEntry* e1 = gHashTable.slots[i];
353
354 while (e1 != NULL) {
355 if (e1 == entry) {
356 return 1;
357 }
358
359 e1 = e1->next;
360 }
361 }
362 }
363
364 return 0;
365}
366
367static void remove_entry(HashEntry* entry)
368{
369 HashEntry* prev = entry->prev;
370 HashEntry* next = entry->next;
371
372 if (prev != NULL) entry->prev->next = next;
373 if (next != NULL) entry->next->prev = prev;
374
375 if (prev == NULL) {
376 // we are the head of the list. set the head to be next
377 gHashTable.slots[entry->slot] = entry->next;
378 }
379
380 // we just removed and entry, decrease the size of the hashtable
381 gHashTable.count--;
382}
383
384
385// =============================================================================
386// stack trace functions
387// =============================================================================
388
389typedef struct
390{
391 size_t count;
392 intptr_t* addrs;
393} stack_crawl_state_t;
394
395
396/* depends how the system includes define this */
397#ifdef HAVE_UNWIND_CONTEXT_STRUCT
398typedef struct _Unwind_Context __unwind_context;
399#else
400typedef _Unwind_Context __unwind_context;
401#endif
402
403static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
404{
405 stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
406 if (state->count) {
407 intptr_t ip = (intptr_t)_Unwind_GetIP(context);
408 if (ip) {
409 state->addrs[0] = ip;
410 state->addrs++;
411 state->count--;
412 return _URC_NO_REASON;
413 }
414 }
415 /*
416 * If we run out of space to record the address or 0 has been seen, stop
417 * unwinding the stack.
418 */
419 return _URC_END_OF_STACK;
420}
421
422static inline
423int get_backtrace(intptr_t* addrs, size_t max_entries)
424{
425 stack_crawl_state_t state;
426 state.count = max_entries;
427 state.addrs = (intptr_t*)addrs;
428 _Unwind_Backtrace(trace_function, (void*)&state);
429 return max_entries - state.count;
430}
431
432// =============================================================================
433// malloc leak function dispatcher
434// =============================================================================
435
436static void* leak_malloc(size_t bytes);
437static void leak_free(void* mem);
438static void* leak_calloc(size_t n_elements, size_t elem_size);
439static void* leak_realloc(void* oldMem, size_t bytes);
440static void* leak_memalign(size_t alignment, size_t bytes);
441
442static void* fill_malloc(size_t bytes);
443static void fill_free(void* mem);
444static void* fill_realloc(void* oldMem, size_t bytes);
445static void* fill_memalign(size_t alignment, size_t bytes);
446
447static void* chk_malloc(size_t bytes);
448static void chk_free(void* mem);
449static void* chk_calloc(size_t n_elements, size_t elem_size);
450static void* chk_realloc(void* oldMem, size_t bytes);
451static void* chk_memalign(size_t alignment, size_t bytes);
452
453typedef struct {
454 void* (*malloc)(size_t bytes);
455 void (*free)(void* mem);
456 void* (*calloc)(size_t n_elements, size_t elem_size);
457 void* (*realloc)(void* oldMem, size_t bytes);
458 void* (*memalign)(size_t alignment, size_t bytes);
459} MallocDebug;
460
461static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
462{
463 { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
464 { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
465 { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
466 { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
467};
468
469enum {
470 INDEX_NORMAL = 0,
471 INDEX_LEAK_CHECK,
472 INDEX_MALLOC_FILL,
473 INDEX_MALLOC_CHECK,
474};
475
476static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
477static int gMallocDebugLevel;
478static int gTrapOnError = 1;
479
480void* malloc(size_t bytes) {
481 return gMallocDispatch->malloc(bytes);
482}
483void free(void* mem) {
484 gMallocDispatch->free(mem);
485}
486void* calloc(size_t n_elements, size_t elem_size) {
487 return gMallocDispatch->calloc(n_elements, elem_size);
488}
489void* realloc(void* oldMem, size_t bytes) {
490 return gMallocDispatch->realloc(oldMem, bytes);
491}
492void* memalign(size_t alignment, size_t bytes) {
493 return gMallocDispatch->memalign(alignment, bytes);
494}
495
496// =============================================================================
497// malloc check functions
498// =============================================================================
499
500#define CHK_FILL_FREE 0xef
501#define CHK_SENTINEL_VALUE 0xeb
502#define CHK_SENTINEL_HEAD_SIZE 16
503#define CHK_SENTINEL_TAIL_SIZE 16
504#define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \
505 CHK_SENTINEL_TAIL_SIZE + \
506 sizeof(size_t) )
507
508static void dump_stack_trace()
509{
510 intptr_t addrs[20];
511 int c = get_backtrace(addrs, 20);
512 char buf[16];
513 char tmp[16*20];
514 int i;
515
516 tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
517 for (i=0 ; i<c; i++) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200518 snprintf(buf, sizeof buf, "%2d: %08x\n", i, addrs[i]);
519 strlcat(tmp, buf, sizeof tmp);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800520 }
521 __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
522}
523
524static int is_valid_malloc_pointer(void* addr)
525{
526 return 1;
527}
528
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200529static void assert_log_message(const char* format, ...)
530{
531 va_list args;
532
533 pthread_mutex_lock(&gAllocationsMutex);
534 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
535 va_start(args, format);
536 __libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
537 format, args);
538 va_end(args);
539 dump_stack_trace();
540 if (gTrapOnError) {
541 __builtin_trap();
542 }
543 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
544 pthread_mutex_unlock(&gAllocationsMutex);
545}
546
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800547static void assert_valid_malloc_pointer(void* mem)
548{
549 if (mem && !is_valid_malloc_pointer(mem)) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200550 assert_log_message(
551 "*** MALLOC CHECK: buffer %p, is not a valid "
552 "malloc pointer (are you mixing up new/delete "
553 "and malloc/free?)", mem);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800554 }
555}
556
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200557/* Check that a given address corresponds to a guarded block,
558 * and returns its original allocation size in '*allocated'.
559 * 'func' is the capitalized name of the caller function.
560 * Returns 0 on success, or -1 on failure.
561 * NOTE: Does not return if gTrapOnError is set.
562 */
563static int chk_mem_check(void* mem,
564 size_t* allocated,
565 const char* func)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800566{
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200567 char* buffer;
568 size_t offset, bytes;
569 int i;
570 char* buf;
571
572 /* first check the bytes in the sentinel header */
573 buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800574 for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
575 if (buf[i] != CHK_SENTINEL_VALUE) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200576 assert_log_message(
577 "*** %s CHECK: buffer %p "
578 "corrupted %d bytes before allocation",
579 func, mem, CHK_SENTINEL_HEAD_SIZE-i);
580 return -1;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800581 }
582 }
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200583
584 /* then the ones in the sentinel trailer */
585 buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
586 offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
587 bytes = *(size_t *)(buffer + offset);
588
589 buf = (char*)mem + bytes;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800590 for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
591 if (buf[i] != CHK_SENTINEL_VALUE) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200592 assert_log_message(
593 "*** %s CHECK: buffer %p, size=%lu, "
594 "corrupted %d bytes after allocation",
595 func, buffer, bytes, i+1);
596 return -1;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800597 }
598 }
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200599
600 *allocated = bytes;
601 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800602}
603
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200604
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800605void* chk_malloc(size_t bytes)
606{
607 char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
608 if (buffer) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200609 memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
610 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
611 *(size_t *)(buffer + offset) = bytes;
612 buffer += CHK_SENTINEL_HEAD_SIZE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800613 }
614 return buffer;
615}
616
617void chk_free(void* mem)
618{
619 assert_valid_malloc_pointer(mem);
620 if (mem) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200621 size_t size;
622 char* buffer;
623
624 if (chk_mem_check(mem, &size, "FREE") == 0) {
625 buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
626 memset(buffer, CHK_FILL_FREE, size + CHK_OVERHEAD_SIZE);
627 dlfree(buffer);
628 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800629 }
630}
631
632void* chk_calloc(size_t n_elements, size_t elem_size)
633{
634 size_t size;
635 void* ptr;
636
637 /* Fail on overflow - just to be safe even though this code runs only
638 * within the debugging C library, not the production one */
639 if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
640 return NULL;
641 }
642 size = n_elements * elem_size;
643 ptr = chk_malloc(size);
644 if (ptr != NULL) {
645 memset(ptr, 0, size);
646 }
647 return ptr;
648}
649
650void* chk_realloc(void* mem, size_t bytes)
651{
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200652 char* buffer;
653 int ret;
654 size_t old_bytes = 0;
655
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800656 assert_valid_malloc_pointer(mem);
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200657
658 if (mem != NULL && chk_mem_check(mem, &old_bytes, "REALLOC") < 0)
659 return NULL;
660
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800661 char* new_buffer = chk_malloc(bytes);
662 if (mem == NULL) {
663 return new_buffer;
664 }
665
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800666 if (new_buffer) {
667 size_t size = (bytes < old_bytes)?(bytes):(old_bytes);
668 memcpy(new_buffer, mem, size);
669 chk_free(mem);
670 }
671
672 return new_buffer;
673}
674
675void* chk_memalign(size_t alignment, size_t bytes)
676{
677 // XXX: it's better to use malloc, than being wrong
678 return chk_malloc(bytes);
679}
680
681// =============================================================================
682// malloc fill functions
683// =============================================================================
684
685void* fill_malloc(size_t bytes)
686{
687 void* buffer = dlmalloc(bytes);
688 if (buffer) {
689 memset(buffer, CHK_SENTINEL_VALUE, bytes);
690 }
691 return buffer;
692}
693
694void fill_free(void* mem)
695{
696 size_t bytes = dlmalloc_usable_size(mem);
697 memset(mem, CHK_FILL_FREE, bytes);
698 dlfree(mem);
699}
700
701void* fill_realloc(void* mem, size_t bytes)
702{
703 void* buffer = fill_malloc(bytes);
704 if (mem == NULL) {
705 return buffer;
706 }
707 if (buffer) {
708 size_t old_size = dlmalloc_usable_size(mem);
709 size_t size = (bytes < old_size)?(bytes):(old_size);
710 memcpy(buffer, mem, size);
711 fill_free(mem);
712 }
713 return buffer;
714}
715
716void* fill_memalign(size_t alignment, size_t bytes)
717{
718 void* buffer = dlmemalign(alignment, bytes);
719 if (buffer) {
720 memset(buffer, CHK_SENTINEL_VALUE, bytes);
721 }
722 return buffer;
723}
724
725// =============================================================================
726// malloc leak functions
727// =============================================================================
728
729#define MEMALIGN_GUARD ((void*)0xA1A41520)
730
731void* leak_malloc(size_t bytes)
732{
733 // allocate enough space infront of the allocation to store the pointer for
734 // the alloc structure. This will making free'ing the structer really fast!
735
736 // 1. allocate enough memory and include our header
737 // 2. set the base pointer to be right after our header
738
739 void* base = dlmalloc(bytes + sizeof(AllocationEntry));
740 if (base != NULL) {
741 pthread_mutex_lock(&gAllocationsMutex);
742
743 intptr_t backtrace[BACKTRACE_SIZE];
744 size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
745
746 AllocationEntry* header = (AllocationEntry*)base;
747 header->entry = record_backtrace(backtrace, numEntries, bytes);
748 header->guard = GUARD;
749
750 // now increment base to point to after our header.
751 // this should just work since our header is 8 bytes.
752 base = (AllocationEntry*)base + 1;
753
754 pthread_mutex_unlock(&gAllocationsMutex);
755 }
756
757 return base;
758}
759
760void leak_free(void* mem)
761{
762 if (mem != NULL) {
763 pthread_mutex_lock(&gAllocationsMutex);
764
765 // check the guard to make sure it is valid
766 AllocationEntry* header = (AllocationEntry*)mem - 1;
767
768 if (header->guard != GUARD) {
769 // could be a memaligned block
770 if (((void**)mem)[-1] == MEMALIGN_GUARD) {
771 mem = ((void**)mem)[-2];
772 header = (AllocationEntry*)mem - 1;
773 }
774 }
775
776 if (header->guard == GUARD || is_valid_entry(header->entry)) {
777 // decrement the allocations
778 HashEntry* entry = header->entry;
779 entry->allocations--;
780 if (entry->allocations <= 0) {
781 remove_entry(entry);
782 dlfree(entry);
783 }
784
785 // now free the memory!
786 dlfree(header);
787 } else {
788 debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
789 header->guard, header->entry);
790 }
791
792 pthread_mutex_unlock(&gAllocationsMutex);
793 }
794}
795
796void* leak_calloc(size_t n_elements, size_t elem_size)
797{
798 size_t size;
799 void* ptr;
800
801 /* Fail on overflow - just to be safe even though this code runs only
802 * within the debugging C library, not the production one */
803 if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
804 return NULL;
805 }
806 size = n_elements * elem_size;
807 ptr = leak_malloc(size);
808 if (ptr != NULL) {
809 memset(ptr, 0, size);
810 }
811 return ptr;
812}
813
814void* leak_realloc(void* oldMem, size_t bytes)
815{
816 if (oldMem == NULL) {
817 return leak_malloc(bytes);
818 }
819 void* newMem = NULL;
820 AllocationEntry* header = (AllocationEntry*)oldMem - 1;
821 if (header && header->guard == GUARD) {
822 size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
823 newMem = leak_malloc(bytes);
824 if (newMem != NULL) {
825 size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
826 memcpy(newMem, oldMem, copySize);
827 leak_free(oldMem);
828 }
829 } else {
830 newMem = dlrealloc(oldMem, bytes);
831 }
832 return newMem;
833}
834
835void* leak_memalign(size_t alignment, size_t bytes)
836{
837 // we can just use malloc
838 if (alignment <= MALLOC_ALIGNMENT)
839 return leak_malloc(bytes);
840
841 // need to make sure it's a power of two
842 if (alignment & (alignment-1))
843 alignment = 1L << (31 - __builtin_clz(alignment));
844
845 // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
846 // we will align by at least MALLOC_ALIGNMENT bytes
847 // and at most alignment-MALLOC_ALIGNMENT bytes
848 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
849 void* base = leak_malloc(size);
850 if (base != NULL) {
851 intptr_t ptr = (intptr_t)base;
852 if ((ptr % alignment) == 0)
853 return base;
854
855 // align the pointer
856 ptr += ((-ptr) % alignment);
857
858 // there is always enough space for the base pointer and the guard
859 ((void**)ptr)[-1] = MEMALIGN_GUARD;
860 ((void**)ptr)[-2] = base;
861
862 return (void*)ptr;
863 }
864 return base;
865}
866#endif /* MALLOC_LEAK_CHECK */
867
868// called from libc_init()
869extern char* __progname;
870
871void malloc_debug_init()
872{
873 unsigned int level = 0;
874#ifdef MALLOC_LEAK_CHECK
875 // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
876 level = 1;
877#endif
878 char env[PROP_VALUE_MAX];
879 int len = __system_property_get("libc.debug.malloc", env);
880
881 if (len) {
882 level = atoi(env);
883#ifndef MALLOC_LEAK_CHECK
884 /* Alert the user that libc_debug.so needs to be installed as libc.so
885 * when performing malloc checks.
886 */
887 if (level != 0) {
888 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
889 "Malloc checks need libc_debug.so pushed to the device!\n");
890
891 }
892#endif
893 }
894
895#ifdef MALLOC_LEAK_CHECK
896 gMallocDebugLevel = level;
897 switch (level) {
898 default:
899 case 0:
900 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
901 break;
902 case 1:
903 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
904 "%s using MALLOC_DEBUG = %d (leak checker)\n",
905 __progname, level);
906 gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
907 break;
908 case 5:
909 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
910 "%s using MALLOC_DEBUG = %d (fill)\n",
911 __progname, level);
912 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
913 break;
914 case 10:
915 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
916 "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
917 __progname, level);
918 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
919 break;
920 }
921#endif
922}