blob: a53b004f82aba8768940ad531e274d189256275a [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanse476f8a2010-01-16 09:53:50 -08007/* Runtime configuration options. */
Dave Rigbye3a16fc2014-09-24 14:19:28 +01008const char *je_malloc_conf JEMALLOC_ATTR(weak);
Jason Evansd1b6e182013-01-22 16:54:26 -08009bool opt_abort =
10#ifdef JEMALLOC_DEBUG
11 true
12#else
13 false
14#endif
15 ;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -020016const char *opt_junk =
17#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18 "true"
19#else
20 "false"
21#endif
22 ;
23bool opt_junk_alloc =
Jason Evansd1b6e182013-01-22 16:54:26 -080024#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25 true
26#else
27 false
28#endif
29 ;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -020030bool opt_junk_free =
31#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32 true
33#else
34 false
35#endif
36 ;
37
Jason Evans122449b2012-04-06 00:35:09 -070038size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070039bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070040bool opt_utrace = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080042bool opt_zero = false;
Christopher Ferrise4294032016-03-02 14:33:02 -080043unsigned opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070044
Jason Evansecd3e592014-04-15 14:33:50 -070045/* Initialized to true if the process is running inside Valgrind. */
46bool in_valgrind;
47
Jason Evanscd9a1342012-03-21 18:33:03 -070048unsigned ncpus;
49
Christopher Ferrise4294032016-03-02 14:33:02 -080050/* Protects arenas initialization. */
Jason Evans8bb31982014-10-07 23:14:57 -070051static malloc_mutex_t arenas_lock;
52/*
53 * Arenas that are used to service external requests. Not all elements of the
54 * arenas array are necessarily used; arenas are created lazily as needed.
55 *
56 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
58 * takes some action to create them and allocate from them.
59 */
Christopher Ferrise4294032016-03-02 14:33:02 -080060arena_t **arenas;
61static unsigned narenas_total; /* Use narenas_total_*(). */
Jason Evans8bb31982014-10-07 23:14:57 -070062static arena_t *a0; /* arenas[0]; read-only after initialization. */
63static unsigned narenas_auto; /* Read-only after initialization. */
Jason Evanscd9a1342012-03-21 18:33:03 -070064
Jason Evans10aff3f2015-01-20 15:37:51 -080065typedef enum {
66 malloc_init_uninitialized = 3,
67 malloc_init_a0_initialized = 2,
68 malloc_init_recursible = 1,
69 malloc_init_initialized = 0 /* Common case --> jnz. */
70} malloc_init_t;
71static malloc_init_t malloc_init_state = malloc_init_uninitialized;
Jason Evanscd9a1342012-03-21 18:33:03 -070072
Qi Wangf4a0f322015-10-27 15:12:10 -070073/* 0 should be the common case. Set to true to trigger initialization. */
74static bool malloc_slow = true;
75
76/* When malloc_slow != 0, set the corresponding bits for sanity check. */
77enum {
78 flag_opt_junk_alloc = (1U),
79 flag_opt_junk_free = (1U << 1),
80 flag_opt_quarantine = (1U << 2),
81 flag_opt_zero = (1U << 3),
82 flag_opt_utrace = (1U << 4),
83 flag_in_valgrind = (1U << 5),
84 flag_opt_xmalloc = (1U << 6)
85};
86static uint8_t malloc_slow_flags;
87
88/* Last entry for overflow detection only. */
Jason Evans155bfa72014-10-05 17:54:10 -070089JEMALLOC_ALIGNED(CACHELINE)
Qi Wangf4a0f322015-10-27 15:12:10 -070090const size_t index2size_tab[NSIZES+1] = {
Jason Evans155bfa72014-10-05 17:54:10 -070091#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
92 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
93 SIZE_CLASSES
94#undef SC
Qi Wangf4a0f322015-10-27 15:12:10 -070095 ZU(0)
Jason Evans155bfa72014-10-05 17:54:10 -070096};
97
98JEMALLOC_ALIGNED(CACHELINE)
99const uint8_t size2index_tab[] = {
Jason Evans81e54752014-10-10 22:34:25 -0700100#if LG_TINY_MIN == 0
101#warning "Dangerous LG_TINY_MIN"
102#define S2B_0(i) i,
103#elif LG_TINY_MIN == 1
104#warning "Dangerous LG_TINY_MIN"
105#define S2B_1(i) i,
106#elif LG_TINY_MIN == 2
107#warning "Dangerous LG_TINY_MIN"
108#define S2B_2(i) i,
109#elif LG_TINY_MIN == 3
Jason Evans155bfa72014-10-05 17:54:10 -0700110#define S2B_3(i) i,
Jason Evans81e54752014-10-10 22:34:25 -0700111#elif LG_TINY_MIN == 4
112#define S2B_4(i) i,
113#elif LG_TINY_MIN == 5
114#define S2B_5(i) i,
115#elif LG_TINY_MIN == 6
116#define S2B_6(i) i,
117#elif LG_TINY_MIN == 7
118#define S2B_7(i) i,
119#elif LG_TINY_MIN == 8
120#define S2B_8(i) i,
121#elif LG_TINY_MIN == 9
122#define S2B_9(i) i,
123#elif LG_TINY_MIN == 10
124#define S2B_10(i) i,
125#elif LG_TINY_MIN == 11
126#define S2B_11(i) i,
127#else
128#error "Unsupported LG_TINY_MIN"
129#endif
130#if LG_TINY_MIN < 1
131#define S2B_1(i) S2B_0(i) S2B_0(i)
132#endif
133#if LG_TINY_MIN < 2
134#define S2B_2(i) S2B_1(i) S2B_1(i)
135#endif
136#if LG_TINY_MIN < 3
137#define S2B_3(i) S2B_2(i) S2B_2(i)
138#endif
139#if LG_TINY_MIN < 4
Jason Evans155bfa72014-10-05 17:54:10 -0700140#define S2B_4(i) S2B_3(i) S2B_3(i)
Jason Evans81e54752014-10-10 22:34:25 -0700141#endif
142#if LG_TINY_MIN < 5
Jason Evans155bfa72014-10-05 17:54:10 -0700143#define S2B_5(i) S2B_4(i) S2B_4(i)
Jason Evans81e54752014-10-10 22:34:25 -0700144#endif
145#if LG_TINY_MIN < 6
Jason Evans155bfa72014-10-05 17:54:10 -0700146#define S2B_6(i) S2B_5(i) S2B_5(i)
Jason Evans81e54752014-10-10 22:34:25 -0700147#endif
148#if LG_TINY_MIN < 7
Jason Evans155bfa72014-10-05 17:54:10 -0700149#define S2B_7(i) S2B_6(i) S2B_6(i)
Jason Evans81e54752014-10-10 22:34:25 -0700150#endif
151#if LG_TINY_MIN < 8
Jason Evans155bfa72014-10-05 17:54:10 -0700152#define S2B_8(i) S2B_7(i) S2B_7(i)
Jason Evans81e54752014-10-10 22:34:25 -0700153#endif
154#if LG_TINY_MIN < 9
Jason Evans155bfa72014-10-05 17:54:10 -0700155#define S2B_9(i) S2B_8(i) S2B_8(i)
Jason Evans81e54752014-10-10 22:34:25 -0700156#endif
157#if LG_TINY_MIN < 10
Jason Evansfc0b3b72014-10-09 17:54:06 -0700158#define S2B_10(i) S2B_9(i) S2B_9(i)
Jason Evans81e54752014-10-10 22:34:25 -0700159#endif
160#if LG_TINY_MIN < 11
Jason Evansfc0b3b72014-10-09 17:54:06 -0700161#define S2B_11(i) S2B_10(i) S2B_10(i)
Jason Evans81e54752014-10-10 22:34:25 -0700162#endif
Jason Evans155bfa72014-10-05 17:54:10 -0700163#define S2B_no(i)
164#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
165 S2B_##lg_delta_lookup(index)
166 SIZE_CLASSES
167#undef S2B_3
168#undef S2B_4
169#undef S2B_5
170#undef S2B_6
171#undef S2B_7
172#undef S2B_8
173#undef S2B_9
Jason Evansfc0b3b72014-10-09 17:54:06 -0700174#undef S2B_10
175#undef S2B_11
Jason Evans155bfa72014-10-05 17:54:10 -0700176#undef S2B_no
177#undef SC
178};
179
Jason Evans41b6afb2012-02-02 22:04:57 -0800180#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -0700181/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -0700182# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -0800183# define INITIALIZER pthread_self()
184# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -0700185static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800186#else
Jason Evans02b23122012-04-05 11:06:23 -0700187# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -0800188# define INITIALIZER true
189# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -0700190static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800191#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700192
193/* Used to avoid initialization races. */
Mike Hommeya19e87f2012-04-21 21:27:46 -0700194#ifdef _WIN32
Matthijsa1aaf942015-06-25 22:53:58 +0200195#if _WIN32_WINNT >= 0x0600
196static malloc_mutex_t init_lock = SRWLOCK_INIT;
197#else
Mike Hommeya19e87f2012-04-21 21:27:46 -0700198static malloc_mutex_t init_lock;
Mike Hommey0a116fa2015-09-03 15:48:48 +0900199static bool init_lock_initialized = false;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700200
201JEMALLOC_ATTR(constructor)
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200202static void WINAPI
203_init_init_lock(void)
Mike Hommeya19e87f2012-04-21 21:27:46 -0700204{
205
Mike Hommey0a116fa2015-09-03 15:48:48 +0900206 /* If another constructor in the same binary is using mallctl to
207 * e.g. setup chunk hooks, it may end up running before this one,
208 * and malloc_init_hard will crash trying to lock the uninitialized
209 * lock. So we force an initialization of the lock in
210 * malloc_init_hard as well. We don't try to care about atomicity
211 * of the accessed to the init_lock_initialized boolean, since it
212 * really only matters early in the process creation, before any
213 * separate thread normally starts doing anything. */
214 if (!init_lock_initialized)
215 malloc_mutex_init(&init_lock);
216 init_lock_initialized = true;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700217}
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200218
219#ifdef _MSC_VER
220# pragma section(".CRT$XCU", read)
221JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
222static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
223#endif
Matthijsa1aaf942015-06-25 22:53:58 +0200224#endif
Mike Hommeya19e87f2012-04-21 21:27:46 -0700225#else
Jason Evanscd9a1342012-03-21 18:33:03 -0700226static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700227#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700228
Jason Evansb1476112012-04-05 13:36:17 -0700229typedef struct {
230 void *p; /* Input pointer (as in realloc(p, s)). */
231 size_t s; /* Request size. */
232 void *r; /* Result pointer. */
233} malloc_utrace_t;
234
235#ifdef JEMALLOC_UTRACE
236# define UTRACE(a, b, c) do { \
Jason Evans9c640bf2014-09-11 16:20:44 -0700237 if (unlikely(opt_utrace)) { \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800238 int utrace_serrno = errno; \
Jason Evansb1476112012-04-05 13:36:17 -0700239 malloc_utrace_t ut; \
240 ut.p = (a); \
241 ut.s = (b); \
242 ut.r = (c); \
243 utrace(&ut, sizeof(ut)); \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800244 errno = utrace_serrno; \
Jason Evansb1476112012-04-05 13:36:17 -0700245 } \
246} while (0)
247#else
248# define UTRACE(a, b, c)
249#endif
250
Jason Evans289053c2009-06-22 12:08:42 -0700251/******************************************************************************/
Jason Evansb2c31662014-01-12 15:05:44 -0800252/*
253 * Function prototypes for static functions that are referenced prior to
254 * definition.
255 */
Jason Evans289053c2009-06-22 12:08:42 -0700256
Jason Evans10aff3f2015-01-20 15:37:51 -0800257static bool malloc_init_hard_a0(void);
Jason Evans289053c2009-06-22 12:08:42 -0700258static bool malloc_init_hard(void);
259
Jason Evans289053c2009-06-22 12:08:42 -0700260/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -0700261/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800262 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700263 */
264
Jason Evans10aff3f2015-01-20 15:37:51 -0800265JEMALLOC_ALWAYS_INLINE_C bool
266malloc_initialized(void)
267{
268
269 return (malloc_init_state == malloc_init_initialized);
270}
271
Jason Evans8bb31982014-10-07 23:14:57 -0700272JEMALLOC_ALWAYS_INLINE_C void
273malloc_thread_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700274{
Jason Evans289053c2009-06-22 12:08:42 -0700275
Jason Evanse476f8a2010-01-16 09:53:50 -0800276 /*
Jason Evans8bb31982014-10-07 23:14:57 -0700277 * TSD initialization can't be safely done as a side effect of
278 * deallocation, because it is possible for a thread to do nothing but
279 * deallocate its TLS data via free(), in which case writing to TLS
280 * would cause write-after-free memory corruption. The quarantine
281 * facility *only* gets used as a side effect of deallocation, so make
282 * a best effort attempt at initializing its TSD by hooking all
283 * allocation events.
Jason Evanse476f8a2010-01-16 09:53:50 -0800284 */
Jason Evans8bb31982014-10-07 23:14:57 -0700285 if (config_fill && unlikely(opt_quarantine))
286 quarantine_alloc_hook();
Jason Evans289053c2009-06-22 12:08:42 -0700287}
288
Jason Evans8bb31982014-10-07 23:14:57 -0700289JEMALLOC_ALWAYS_INLINE_C bool
Jason Evans10aff3f2015-01-20 15:37:51 -0800290malloc_init_a0(void)
291{
292
293 if (unlikely(malloc_init_state == malloc_init_uninitialized))
294 return (malloc_init_hard_a0());
295 return (false);
296}
297
298JEMALLOC_ALWAYS_INLINE_C bool
Jason Evans8bb31982014-10-07 23:14:57 -0700299malloc_init(void)
300{
301
Jason Evans10aff3f2015-01-20 15:37:51 -0800302 if (unlikely(!malloc_initialized()) && malloc_init_hard())
Jason Evans8bb31982014-10-07 23:14:57 -0700303 return (true);
304 malloc_thread_init();
305
306 return (false);
307}
308
309/*
Jason Evans10aff3f2015-01-20 15:37:51 -0800310 * The a0*() functions are used instead of i[mcd]alloc() in situations that
311 * cannot tolerate TLS variable access.
Jason Evans8bb31982014-10-07 23:14:57 -0700312 */
313
Jason Evans8bb31982014-10-07 23:14:57 -0700314static void *
Jason Evans4581b972014-11-27 17:22:36 -0200315a0ialloc(size_t size, bool zero, bool is_metadata)
Jason Evans8bb31982014-10-07 23:14:57 -0700316{
Jason Evans8bb31982014-10-07 23:14:57 -0700317
Jason Evans10aff3f2015-01-20 15:37:51 -0800318 if (unlikely(malloc_init_a0()))
Jason Evans8bb31982014-10-07 23:14:57 -0700319 return (NULL);
320
Qi Wangf4a0f322015-10-27 15:12:10 -0700321 return (iallocztm(NULL, size, size2index(size), zero, false,
Christopher Ferrise4294032016-03-02 14:33:02 -0800322 is_metadata, arena_get(0, false), true));
Jason Evans8bb31982014-10-07 23:14:57 -0700323}
324
Jason Evans10aff3f2015-01-20 15:37:51 -0800325static void
Jason Evans4581b972014-11-27 17:22:36 -0200326a0idalloc(void *ptr, bool is_metadata)
Jason Evans8bb31982014-10-07 23:14:57 -0700327{
Jason Evans8bb31982014-10-07 23:14:57 -0700328
Qi Wangf4a0f322015-10-27 15:12:10 -0700329 idalloctm(NULL, ptr, false, is_metadata, true);
Jason Evans8bb31982014-10-07 23:14:57 -0700330}
331
Jason Evans10aff3f2015-01-20 15:37:51 -0800332void *
Jason Evans4581b972014-11-27 17:22:36 -0200333a0malloc(size_t size)
Jason Evans10aff3f2015-01-20 15:37:51 -0800334{
335
Jason Evans4581b972014-11-27 17:22:36 -0200336 return (a0ialloc(size, false, true));
Jason Evans10aff3f2015-01-20 15:37:51 -0800337}
338
339void
340a0dalloc(void *ptr)
341{
342
Jason Evans4581b972014-11-27 17:22:36 -0200343 a0idalloc(ptr, true);
Jason Evans10aff3f2015-01-20 15:37:51 -0800344}
345
346/*
347 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
348 * situations that cannot tolerate TLS variable access (TLS allocation and very
349 * early internal data structure initialization).
350 */
351
352void *
353bootstrap_malloc(size_t size)
354{
355
356 if (unlikely(size == 0))
357 size = 1;
358
Jason Evans4581b972014-11-27 17:22:36 -0200359 return (a0ialloc(size, false, false));
Jason Evans10aff3f2015-01-20 15:37:51 -0800360}
361
362void *
363bootstrap_calloc(size_t num, size_t size)
364{
365 size_t num_size;
366
367 num_size = num * size;
368 if (unlikely(num_size == 0)) {
369 assert(num == 0 || size == 0);
370 num_size = 1;
371 }
372
Jason Evans4581b972014-11-27 17:22:36 -0200373 return (a0ialloc(num_size, true, false));
Jason Evans10aff3f2015-01-20 15:37:51 -0800374}
375
376void
377bootstrap_free(void *ptr)
378{
379
380 if (unlikely(ptr == NULL))
381 return;
382
Jason Evans4581b972014-11-27 17:22:36 -0200383 a0idalloc(ptr, false);
Jason Evans10aff3f2015-01-20 15:37:51 -0800384}
385
Christopher Ferrise4294032016-03-02 14:33:02 -0800386static void
387arena_set(unsigned ind, arena_t *arena)
388{
389
390 atomic_write_p((void **)&arenas[ind], arena);
391}
392
393static void
394narenas_total_set(unsigned narenas)
395{
396
397 atomic_write_u(&narenas_total, narenas);
398}
399
400static void
401narenas_total_inc(void)
402{
403
404 atomic_add_u(&narenas_total, 1);
405}
406
407unsigned
408narenas_total_get(void)
409{
410
411 return (atomic_read_u(&narenas_total));
412}
413
Jason Evans8bb31982014-10-07 23:14:57 -0700414/* Create a new arena and insert it into the arenas array at index ind. */
Jason Evans3a8b9b12014-10-08 00:54:16 -0700415static arena_t *
416arena_init_locked(unsigned ind)
Jason Evans8bb31982014-10-07 23:14:57 -0700417{
418 arena_t *arena;
419
Christopher Ferrise4294032016-03-02 14:33:02 -0800420 assert(ind <= narenas_total_get());
Jason Evans1cb181e2015-01-29 15:30:47 -0800421 if (ind > MALLOCX_ARENA_MAX)
422 return (NULL);
Christopher Ferrise4294032016-03-02 14:33:02 -0800423 if (ind == narenas_total_get())
424 narenas_total_inc();
Jason Evans8bb31982014-10-07 23:14:57 -0700425
426 /*
427 * Another thread may have already initialized arenas[ind] if it's an
428 * auto arena.
429 */
Christopher Ferrise4294032016-03-02 14:33:02 -0800430 arena = arena_get(ind, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700431 if (arena != NULL) {
432 assert(ind < narenas_auto);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700433 return (arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700434 }
435
436 /* Actually initialize the arena. */
Christopher Ferrise4294032016-03-02 14:33:02 -0800437 arena = arena_new(ind);
438 arena_set(ind, arena);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700439 return (arena);
440}
441
442arena_t *
443arena_init(unsigned ind)
444{
445 arena_t *arena;
446
447 malloc_mutex_lock(&arenas_lock);
448 arena = arena_init_locked(ind);
Jason Evans8bb31982014-10-07 23:14:57 -0700449 malloc_mutex_unlock(&arenas_lock);
450 return (arena);
451}
452
Jason Evans8bb31982014-10-07 23:14:57 -0700453static void
454arena_bind(tsd_t *tsd, unsigned ind)
455{
Christopher Ferrise4294032016-03-02 14:33:02 -0800456 arena_t *arena;
Jason Evans8bb31982014-10-07 23:14:57 -0700457
Christopher Ferrise4294032016-03-02 14:33:02 -0800458 arena = arena_get(ind, false);
459 arena_nthreads_inc(arena);
460
461 if (tsd_nominal(tsd))
462 tsd_arena_set(tsd, arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700463}
464
465void
466arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
467{
468 arena_t *oldarena, *newarena;
469
Christopher Ferrise4294032016-03-02 14:33:02 -0800470 oldarena = arena_get(oldind, false);
471 newarena = arena_get(newind, false);
472 arena_nthreads_dec(oldarena);
473 arena_nthreads_inc(newarena);
Jason Evans8bb31982014-10-07 23:14:57 -0700474 tsd_arena_set(tsd, newarena);
475}
476
Jason Evans8bb31982014-10-07 23:14:57 -0700477static void
478arena_unbind(tsd_t *tsd, unsigned ind)
479{
480 arena_t *arena;
481
Christopher Ferrise4294032016-03-02 14:33:02 -0800482 arena = arena_get(ind, false);
483 arena_nthreads_dec(arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700484 tsd_arena_set(tsd, NULL);
485}
486
Christopher Ferrise4294032016-03-02 14:33:02 -0800487arena_tdata_t *
488arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
Jason Evans8bb31982014-10-07 23:14:57 -0700489{
Christopher Ferrise4294032016-03-02 14:33:02 -0800490 arena_tdata_t *tdata, *arenas_tdata_old;
491 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
492 unsigned narenas_tdata_old, i;
493 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
Jason Evans8bb31982014-10-07 23:14:57 -0700494 unsigned narenas_actual = narenas_total_get();
495
Christopher Ferrise4294032016-03-02 14:33:02 -0800496 /*
497 * Dissociate old tdata array (and set up for deallocation upon return)
498 * if it's too small.
499 */
500 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
501 arenas_tdata_old = arenas_tdata;
502 narenas_tdata_old = narenas_tdata;
503 arenas_tdata = NULL;
504 narenas_tdata = 0;
505 tsd_arenas_tdata_set(tsd, arenas_tdata);
506 tsd_narenas_tdata_set(tsd, narenas_tdata);
507 } else {
508 arenas_tdata_old = NULL;
509 narenas_tdata_old = 0;
Jason Evans8bb31982014-10-07 23:14:57 -0700510 }
511
Christopher Ferrise4294032016-03-02 14:33:02 -0800512 /* Allocate tdata array if it's missing. */
513 if (arenas_tdata == NULL) {
514 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
515 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
Jason Evans8bb31982014-10-07 23:14:57 -0700516
Christopher Ferrise4294032016-03-02 14:33:02 -0800517 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
518 *arenas_tdata_bypassp = true;
519 arenas_tdata = (arena_tdata_t *)a0malloc(
520 sizeof(arena_tdata_t) * narenas_tdata);
521 *arenas_tdata_bypassp = false;
Jason Evans30949da2015-08-25 16:13:59 -0700522 }
Christopher Ferrise4294032016-03-02 14:33:02 -0800523 if (arenas_tdata == NULL) {
524 tdata = NULL;
525 goto label_return;
Jason Evans8bb31982014-10-07 23:14:57 -0700526 }
Christopher Ferrise4294032016-03-02 14:33:02 -0800527 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
528 tsd_arenas_tdata_set(tsd, arenas_tdata);
529 tsd_narenas_tdata_set(tsd, narenas_tdata);
Jason Evans8bb31982014-10-07 23:14:57 -0700530 }
531
532 /*
Christopher Ferrise4294032016-03-02 14:33:02 -0800533 * Copy to tdata array. It's possible that the actual number of arenas
534 * has increased since narenas_total_get() was called above, but that
535 * causes no correctness issues unless two threads concurrently execute
536 * the arenas.extend mallctl, which we trust mallctl synchronization to
Jason Evans8bb31982014-10-07 23:14:57 -0700537 * prevent.
538 */
Christopher Ferrise4294032016-03-02 14:33:02 -0800539
540 /* Copy/initialize tickers. */
541 for (i = 0; i < narenas_actual; i++) {
542 if (i < narenas_tdata_old) {
543 ticker_copy(&arenas_tdata[i].decay_ticker,
544 &arenas_tdata_old[i].decay_ticker);
545 } else {
546 ticker_init(&arenas_tdata[i].decay_ticker,
547 DECAY_NTICKS_PER_UPDATE);
548 }
549 }
550 if (narenas_tdata > narenas_actual) {
551 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
552 * (narenas_tdata - narenas_actual));
Jason Evans8bb31982014-10-07 23:14:57 -0700553 }
554
Christopher Ferrise4294032016-03-02 14:33:02 -0800555 /* Read the refreshed tdata array. */
556 tdata = &arenas_tdata[ind];
557label_return:
558 if (arenas_tdata_old != NULL)
559 a0dalloc(arenas_tdata_old);
560 return (tdata);
Jason Evans8bb31982014-10-07 23:14:57 -0700561}
562
563/* Slow path, called only by arena_choose(). */
564arena_t *
565arena_choose_hard(tsd_t *tsd)
Jason Evans289053c2009-06-22 12:08:42 -0700566{
567 arena_t *ret;
568
Jason Evans609ae592012-10-11 13:53:15 -0700569 if (narenas_auto > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700570 unsigned i, choose, first_null;
571
572 choose = 0;
Jason Evans609ae592012-10-11 13:53:15 -0700573 first_null = narenas_auto;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800574 malloc_mutex_lock(&arenas_lock);
Christopher Ferrise4294032016-03-02 14:33:02 -0800575 assert(arena_get(0, false) != NULL);
Jason Evans609ae592012-10-11 13:53:15 -0700576 for (i = 1; i < narenas_auto; i++) {
Christopher Ferrise4294032016-03-02 14:33:02 -0800577 if (arena_get(i, false) != NULL) {
Jason Evans597632b2011-03-18 13:41:33 -0700578 /*
579 * Choose the first arena that has the lowest
580 * number of threads assigned to it.
581 */
Christopher Ferrise4294032016-03-02 14:33:02 -0800582 if (arena_nthreads_get(arena_get(i, false)) <
583 arena_nthreads_get(arena_get(choose,
584 false)))
Jason Evans597632b2011-03-18 13:41:33 -0700585 choose = i;
Jason Evans609ae592012-10-11 13:53:15 -0700586 } else if (first_null == narenas_auto) {
Jason Evans597632b2011-03-18 13:41:33 -0700587 /*
588 * Record the index of the first uninitialized
589 * arena, in case all extant arenas are in use.
590 *
591 * NB: It is possible for there to be
592 * discontinuities in terms of initialized
593 * versus uninitialized arenas, due to the
594 * "thread.arena" mallctl.
595 */
596 first_null = i;
597 }
598 }
599
Christopher Ferrise4294032016-03-02 14:33:02 -0800600 if (arena_nthreads_get(arena_get(choose, false)) == 0
Jason Evans609ae592012-10-11 13:53:15 -0700601 || first_null == narenas_auto) {
Jason Evans597632b2011-03-18 13:41:33 -0700602 /*
603 * Use an unloaded arena, or the least loaded arena if
604 * all arenas are already initialized.
605 */
Christopher Ferrise4294032016-03-02 14:33:02 -0800606 ret = arena_get(choose, false);
Jason Evans597632b2011-03-18 13:41:33 -0700607 } else {
608 /* Initialize a new arena. */
Jason Evans8bb31982014-10-07 23:14:57 -0700609 choose = first_null;
Jason Evans3a8b9b12014-10-08 00:54:16 -0700610 ret = arena_init_locked(choose);
Jason Evans8bb31982014-10-07 23:14:57 -0700611 if (ret == NULL) {
612 malloc_mutex_unlock(&arenas_lock);
613 return (NULL);
614 }
Jason Evans597632b2011-03-18 13:41:33 -0700615 }
Christopher Ferrise4294032016-03-02 14:33:02 -0800616 arena_bind(tsd, choose);
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800617 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700618 } else {
Christopher Ferrise4294032016-03-02 14:33:02 -0800619 ret = arena_get(0, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700620 arena_bind(tsd, 0);
Jason Evans597632b2011-03-18 13:41:33 -0700621 }
Jason Evans289053c2009-06-22 12:08:42 -0700622
Jason Evans289053c2009-06-22 12:08:42 -0700623 return (ret);
624}
Jason Evans289053c2009-06-22 12:08:42 -0700625
Jason Evans5460aa62014-09-22 21:09:23 -0700626void
627thread_allocated_cleanup(tsd_t *tsd)
628{
629
630 /* Do nothing. */
631}
632
633void
634thread_deallocated_cleanup(tsd_t *tsd)
635{
636
637 /* Do nothing. */
638}
639
640void
641arena_cleanup(tsd_t *tsd)
642{
Jason Evans8bb31982014-10-07 23:14:57 -0700643 arena_t *arena;
644
645 arena = tsd_arena_get(tsd);
646 if (arena != NULL)
647 arena_unbind(tsd, arena->ind);
648}
649
650void
Christopher Ferrise4294032016-03-02 14:33:02 -0800651arenas_tdata_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700652{
Christopher Ferrise4294032016-03-02 14:33:02 -0800653 arena_tdata_t *arenas_tdata;
Jason Evans8bb31982014-10-07 23:14:57 -0700654
Christopher Ferrise4294032016-03-02 14:33:02 -0800655 /* Prevent tsd->arenas_tdata from being (re)created. */
656 *tsd_arenas_tdata_bypassp_get(tsd) = true;
657
658 arenas_tdata = tsd_arenas_tdata_get(tsd);
659 if (arenas_tdata != NULL) {
660 tsd_arenas_tdata_set(tsd, NULL);
661 a0dalloc(arenas_tdata);
Christopher Ferris45e9f662015-08-21 12:23:06 -0700662 }
Jason Evans8bb31982014-10-07 23:14:57 -0700663}
664
665void
Christopher Ferrise4294032016-03-02 14:33:02 -0800666narenas_tdata_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700667{
668
669 /* Do nothing. */
670}
671
672void
Christopher Ferrise4294032016-03-02 14:33:02 -0800673arenas_tdata_bypass_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700674{
Jason Evans5460aa62014-09-22 21:09:23 -0700675
676 /* Do nothing. */
677}
678
Jason Evans03c22372010-01-03 12:10:42 -0800679static void
680stats_print_atexit(void)
681{
682
Jason Evans7372b152012-02-10 20:22:09 -0800683 if (config_tcache && config_stats) {
Jason Evans609ae592012-10-11 13:53:15 -0700684 unsigned narenas, i;
Jason Evans03c22372010-01-03 12:10:42 -0800685
Jason Evans7372b152012-02-10 20:22:09 -0800686 /*
687 * Merge stats from extant threads. This is racy, since
688 * individual threads do not lock when recording tcache stats
689 * events. As a consequence, the final stats may be slightly
690 * out of date by the time they are reported, if other threads
691 * continue to allocate.
692 */
Jason Evans609ae592012-10-11 13:53:15 -0700693 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
Christopher Ferrise4294032016-03-02 14:33:02 -0800694 arena_t *arena = arena_get(i, false);
Jason Evans7372b152012-02-10 20:22:09 -0800695 if (arena != NULL) {
696 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800697
Jason Evans7372b152012-02-10 20:22:09 -0800698 /*
699 * tcache_stats_merge() locks bins, so if any
700 * code is introduced that acquires both arena
701 * and bin locks in the opposite order,
702 * deadlocks may result.
703 */
704 malloc_mutex_lock(&arena->lock);
705 ql_foreach(tcache, &arena->tcache_ql, link) {
706 tcache_stats_merge(tcache, arena);
707 }
708 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800709 }
Jason Evans03c22372010-01-03 12:10:42 -0800710 }
711 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800712 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700713}
714
Jason Evans289053c2009-06-22 12:08:42 -0700715/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800716 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700717 */
718/******************************************************************************/
719/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800720 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700721 */
722
Daniel Micayb74041f2014-12-09 17:41:34 -0500723#ifndef JEMALLOC_HAVE_SECURE_GETENV
Igor Podlesny95e88de2015-03-24 12:49:26 +0700724static char *
725secure_getenv(const char *name)
726{
727
Daniel Micayb74041f2014-12-09 17:41:34 -0500728# ifdef JEMALLOC_HAVE_ISSETUGID
Igor Podlesny95e88de2015-03-24 12:49:26 +0700729 if (issetugid() != 0)
Daniel Micayb74041f2014-12-09 17:41:34 -0500730 return (NULL);
Igor Podlesny95e88de2015-03-24 12:49:26 +0700731# endif
Daniel Micayb74041f2014-12-09 17:41:34 -0500732 return (getenv(name));
733}
Daniel Micayb74041f2014-12-09 17:41:34 -0500734#endif
735
Jason Evansc9658dd2009-06-22 14:44:08 -0700736static unsigned
737malloc_ncpus(void)
738{
Jason Evansb7924f52009-06-23 19:01:18 -0700739 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700740
Mike Hommeya19e87f2012-04-21 21:27:46 -0700741#ifdef _WIN32
742 SYSTEM_INFO si;
743 GetSystemInfo(&si);
744 result = si.dwNumberOfProcessors;
745#else
Jason Evansb7924f52009-06-23 19:01:18 -0700746 result = sysconf(_SC_NPROCESSORS_ONLN);
Corey Richardson1d553f72012-09-26 16:28:29 -0400747#endif
Jason Evansaddad092013-11-29 16:19:44 -0800748 return ((result == -1) ? 1 : (unsigned)result);
Jason Evansc9658dd2009-06-22 14:44:08 -0700749}
Jason Evansb7924f52009-06-23 19:01:18 -0700750
Jason Evans289053c2009-06-22 12:08:42 -0700751static bool
Jason Evanse7339702010-10-23 18:37:06 -0700752malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
753 char const **v_p, size_t *vlen_p)
754{
755 bool accept;
756 const char *opts = *opts_p;
757
758 *k_p = opts;
759
Jason Evans551ebc42014-10-03 10:16:09 -0700760 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700761 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800762 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
763 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
764 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
765 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
766 case 'Y': case 'Z':
767 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
768 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
769 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
770 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
771 case 'y': case 'z':
772 case '0': case '1': case '2': case '3': case '4': case '5':
773 case '6': case '7': case '8': case '9':
774 case '_':
775 opts++;
776 break;
777 case ':':
778 opts++;
779 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
780 *v_p = opts;
781 accept = true;
782 break;
783 case '\0':
784 if (opts != *opts_p) {
785 malloc_write("<jemalloc>: Conf string ends "
786 "with key\n");
787 }
788 return (true);
789 default:
790 malloc_write("<jemalloc>: Malformed conf string\n");
791 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700792 }
793 }
794
Jason Evans551ebc42014-10-03 10:16:09 -0700795 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700796 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800797 case ',':
798 opts++;
799 /*
800 * Look ahead one character here, because the next time
801 * this function is called, it will assume that end of
802 * input has been cleanly reached if no input remains,
803 * but we have optimistically already consumed the
804 * comma if one exists.
805 */
806 if (*opts == '\0') {
807 malloc_write("<jemalloc>: Conf string ends "
808 "with comma\n");
809 }
810 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
811 accept = true;
812 break;
813 case '\0':
814 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
815 accept = true;
816 break;
817 default:
818 opts++;
819 break;
Jason Evanse7339702010-10-23 18:37:06 -0700820 }
821 }
822
823 *opts_p = opts;
824 return (false);
825}
826
827static void
828malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
829 size_t vlen)
830{
Jason Evanse7339702010-10-23 18:37:06 -0700831
Jason Evansd81e4bd2012-03-06 14:57:45 -0800832 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
833 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700834}
835
836static void
Qi Wangf4a0f322015-10-27 15:12:10 -0700837malloc_slow_flag_init(void)
838{
839 /*
840 * Combine the runtime options into malloc_slow for fast path. Called
841 * after processing all the options.
842 */
843 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
844 | (opt_junk_free ? flag_opt_junk_free : 0)
845 | (opt_quarantine ? flag_opt_quarantine : 0)
846 | (opt_zero ? flag_opt_zero : 0)
847 | (opt_utrace ? flag_opt_utrace : 0)
848 | (opt_xmalloc ? flag_opt_xmalloc : 0);
849
850 if (config_valgrind)
851 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
852
853 malloc_slow = (malloc_slow_flags != 0);
854}
855
856static void
Jason Evanse7339702010-10-23 18:37:06 -0700857malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700858{
859 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700860 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700861 const char *opts, *k, *v;
862 size_t klen, vlen;
863
Jason Evans781fe752012-05-15 14:48:14 -0700864 /*
865 * Automatically configure valgrind before processing options. The
866 * valgrind option remains in jemalloc 3.x for compatibility reasons.
867 */
868 if (config_valgrind) {
Jason Evansecd3e592014-04-15 14:33:50 -0700869 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
Jason Evans9c640bf2014-09-11 16:20:44 -0700870 if (config_fill && unlikely(in_valgrind)) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -0200871 opt_junk = "false";
872 opt_junk_alloc = false;
873 opt_junk_free = false;
Jason Evans551ebc42014-10-03 10:16:09 -0700874 assert(!opt_zero);
Jason Evans781fe752012-05-15 14:48:14 -0700875 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
876 opt_redzone = true;
877 }
Jason Evans9c640bf2014-09-11 16:20:44 -0700878 if (config_tcache && unlikely(in_valgrind))
Jason Evans174b70e2012-05-15 23:31:53 -0700879 opt_tcache = false;
Jason Evans781fe752012-05-15 14:48:14 -0700880 }
881
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700882#if defined(__ANDROID__)
Christopher Ferrise4294032016-03-02 14:33:02 -0800883 for (i = 0; i < 2; i++) {
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700884#else
Christopher Ferrise4294032016-03-02 14:33:02 -0800885 for (i = 0; i < 4; i++) {
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700886#endif
Jason Evanse7339702010-10-23 18:37:06 -0700887 /* Get runtime configuration. */
888 switch (i) {
889 case 0:
Christopher Ferrise4294032016-03-02 14:33:02 -0800890 opts = config_malloc_conf;
891 break;
892 case 1:
Jason Evans0a5489e2012-03-01 17:19:20 -0800893 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700894 /*
895 * Use options that were compiled into the
896 * program.
897 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800898 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700899 } else {
900 /* No configuration specified. */
901 buf[0] = '\0';
902 opts = buf;
903 }
904 break;
Christopher Ferrise4294032016-03-02 14:33:02 -0800905 case 2: {
906 ssize_t linklen = 0;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700907#ifndef _WIN32
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200908 int saved_errno = errno;
Jason Evanse7339702010-10-23 18:37:06 -0700909 const char *linkname =
Mike Hommeya19e87f2012-04-21 21:27:46 -0700910# ifdef JEMALLOC_PREFIX
Jason Evanse7339702010-10-23 18:37:06 -0700911 "/etc/"JEMALLOC_PREFIX"malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700912# else
Jason Evanse7339702010-10-23 18:37:06 -0700913 "/etc/malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700914# endif
Jason Evanse7339702010-10-23 18:37:06 -0700915 ;
916
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200917 /*
918 * Try to use the contents of the "/etc/malloc.conf"
919 * symbolic link's name.
920 */
921 linklen = readlink(linkname, buf, sizeof(buf) - 1);
922 if (linklen == -1) {
Jason Evanse7339702010-10-23 18:37:06 -0700923 /* No configuration specified. */
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200924 linklen = 0;
Jason Evanse12eaf92014-12-08 14:40:14 -0800925 /* Restore errno. */
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200926 set_errno(saved_errno);
Jason Evanse7339702010-10-23 18:37:06 -0700927 }
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200928#endif
929 buf[linklen] = '\0';
930 opts = buf;
Jason Evanse7339702010-10-23 18:37:06 -0700931 break;
Christopher Ferrise4294032016-03-02 14:33:02 -0800932 } case 3: {
Jason Evanse7339702010-10-23 18:37:06 -0700933 const char *envname =
934#ifdef JEMALLOC_PREFIX
935 JEMALLOC_CPREFIX"MALLOC_CONF"
936#else
937 "MALLOC_CONF"
938#endif
939 ;
940
Daniel Micayb74041f2014-12-09 17:41:34 -0500941 if ((opts = secure_getenv(envname)) != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700942 /*
943 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800944 * the value of the MALLOC_CONF environment
945 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700946 */
947 } else {
948 /* No configuration specified. */
949 buf[0] = '\0';
950 opts = buf;
951 }
952 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800953 } default:
Jason Evans6556e282013-10-21 14:56:27 -0700954 not_reached();
Jason Evanse7339702010-10-23 18:37:06 -0700955 buf[0] = '\0';
956 opts = buf;
957 }
958
Jason Evans551ebc42014-10-03 10:16:09 -0700959 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
960 &vlen)) {
Jason Evansbd87b012014-04-15 16:35:08 -0700961#define CONF_MATCH(n) \
962 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -0200963#define CONF_MATCH_VALUE(n) \
964 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
Jason Evansbd87b012014-04-15 16:35:08 -0700965#define CONF_HANDLE_BOOL(o, n, cont) \
966 if (CONF_MATCH(n)) { \
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -0200967 if (CONF_MATCH_VALUE("true")) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800968 o = true; \
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -0200969 else if (CONF_MATCH_VALUE("false")) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800970 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700971 else { \
972 malloc_conf_error( \
973 "Invalid conf value", \
974 k, klen, v, vlen); \
975 } \
Jason Evansbd87b012014-04-15 16:35:08 -0700976 if (cont) \
977 continue; \
Jason Evans1bf27432012-12-23 08:51:48 -0800978 }
Christopher Ferrise4294032016-03-02 14:33:02 -0800979#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
Jason Evansbd87b012014-04-15 16:35:08 -0700980 if (CONF_MATCH(n)) { \
Jason Evans122449b2012-04-06 00:35:09 -0700981 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700982 char *end; \
983 \
Mike Hommeya14bce82012-04-30 12:38:26 +0200984 set_errno(0); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800985 um = malloc_strtoumax(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +0200986 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -0700987 (uintptr_t)v != vlen) { \
988 malloc_conf_error( \
989 "Invalid conf value", \
990 k, klen, v, vlen); \
Jason Evans1bf27432012-12-23 08:51:48 -0800991 } else if (clip) { \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700992 if ((min) != 0 && um < (min)) \
Christopher Ferrise4294032016-03-02 14:33:02 -0800993 o = (t)(min); \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700994 else if (um > (max)) \
Christopher Ferrise4294032016-03-02 14:33:02 -0800995 o = (t)(max); \
Jason Evans1bf27432012-12-23 08:51:48 -0800996 else \
Christopher Ferrise4294032016-03-02 14:33:02 -0800997 o = (t)um; \
Jason Evans1bf27432012-12-23 08:51:48 -0800998 } else { \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700999 if (((min) != 0 && um < (min)) \
1000 || um > (max)) { \
Jason Evans1bf27432012-12-23 08:51:48 -08001001 malloc_conf_error( \
1002 "Out-of-range " \
1003 "conf value", \
1004 k, klen, v, vlen); \
1005 } else \
Christopher Ferrise4294032016-03-02 14:33:02 -08001006 o = (t)um; \
Jason Evans1bf27432012-12-23 08:51:48 -08001007 } \
Jason Evanse7339702010-10-23 18:37:06 -07001008 continue; \
1009 }
Christopher Ferrise4294032016-03-02 14:33:02 -08001010#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1011 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1012#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1013 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001014#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evansbd87b012014-04-15 16:35:08 -07001015 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001016 long l; \
1017 char *end; \
1018 \
Mike Hommeya14bce82012-04-30 12:38:26 +02001019 set_errno(0); \
Jason Evanse7339702010-10-23 18:37:06 -07001020 l = strtol(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +02001021 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -07001022 (uintptr_t)v != vlen) { \
1023 malloc_conf_error( \
1024 "Invalid conf value", \
1025 k, klen, v, vlen); \
Jason Evansfc0b3b72014-10-09 17:54:06 -07001026 } else if (l < (ssize_t)(min) || l > \
1027 (ssize_t)(max)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001028 malloc_conf_error( \
1029 "Out-of-range conf value", \
1030 k, klen, v, vlen); \
1031 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001032 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -07001033 continue; \
1034 }
Jason Evansd81e4bd2012-03-06 14:57:45 -08001035#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evansbd87b012014-04-15 16:35:08 -07001036 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001037 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001038 sizeof(o)-1) ? vlen : \
1039 sizeof(o)-1; \
1040 strncpy(o, v, cpylen); \
1041 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -07001042 continue; \
1043 }
1044
Jason Evansbd87b012014-04-15 16:35:08 -07001045 CONF_HANDLE_BOOL(opt_abort, "abort", true)
Jason Evanse7339702010-10-23 18:37:06 -07001046 /*
Jason Evansfc0b3b72014-10-09 17:54:06 -07001047 * Chunks always require at least one header page,
1048 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1049 * possibly an additional page in the presence of
1050 * redzones. In order to simplify options processing,
1051 * use a conservative bound that accommodates all these
1052 * constraints.
Jason Evanse7339702010-10-23 18:37:06 -07001053 */
Jason Evans606f1fd2012-04-20 21:39:14 -07001054 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evansfc0b3b72014-10-09 17:54:06 -07001055 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1056 (sizeof(size_t) << 3) - 1, true)
Jason Evans609ae592012-10-11 13:53:15 -07001057 if (strncmp("dss", k, klen) == 0) {
1058 int i;
1059 bool match = false;
1060 for (i = 0; i < dss_prec_limit; i++) {
1061 if (strncmp(dss_prec_names[i], v, vlen)
1062 == 0) {
1063 if (chunk_dss_prec_set(i)) {
1064 malloc_conf_error(
1065 "Error setting dss",
1066 k, klen, v, vlen);
1067 } else {
1068 opt_dss =
1069 dss_prec_names[i];
1070 match = true;
1071 break;
1072 }
1073 }
1074 }
Jason Evans551ebc42014-10-03 10:16:09 -07001075 if (!match) {
Jason Evans609ae592012-10-11 13:53:15 -07001076 malloc_conf_error("Invalid conf value",
1077 k, klen, v, vlen);
1078 }
1079 continue;
1080 }
Christopher Ferrise4294032016-03-02 14:33:02 -08001081 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1082 UINT_MAX, false)
1083 if (strncmp("purge", k, klen) == 0) {
1084 int i;
1085 bool match = false;
1086 for (i = 0; i < purge_mode_limit; i++) {
1087 if (strncmp(purge_mode_names[i], v,
1088 vlen) == 0) {
1089 opt_purge = (purge_mode_t)i;
1090 match = true;
1091 break;
1092 }
1093 }
1094 if (!match) {
1095 malloc_conf_error("Invalid conf value",
1096 k, klen, v, vlen);
1097 }
1098 continue;
1099 }
Jason Evans606f1fd2012-04-20 21:39:14 -07001100 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -08001101 -1, (sizeof(size_t) << 3) - 1)
Christopher Ferrise4294032016-03-02 14:33:02 -08001102 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1103 NSTIME_SEC_MAX);
Jason Evansbd87b012014-04-15 16:35:08 -07001104 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
Jason Evans7372b152012-02-10 20:22:09 -08001105 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001106 if (CONF_MATCH("junk")) {
1107 if (CONF_MATCH_VALUE("true")) {
1108 opt_junk = "true";
1109 opt_junk_alloc = opt_junk_free =
1110 true;
1111 } else if (CONF_MATCH_VALUE("false")) {
1112 opt_junk = "false";
1113 opt_junk_alloc = opt_junk_free =
1114 false;
1115 } else if (CONF_MATCH_VALUE("alloc")) {
1116 opt_junk = "alloc";
1117 opt_junk_alloc = true;
1118 opt_junk_free = false;
1119 } else if (CONF_MATCH_VALUE("free")) {
1120 opt_junk = "free";
1121 opt_junk_alloc = false;
1122 opt_junk_free = true;
1123 } else {
1124 malloc_conf_error(
1125 "Invalid conf value", k,
1126 klen, v, vlen);
1127 }
1128 continue;
1129 }
Jason Evans606f1fd2012-04-20 21:39:14 -07001130 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans1bf27432012-12-23 08:51:48 -08001131 0, SIZE_T_MAX, false)
Jason Evansbd87b012014-04-15 16:35:08 -07001132 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1133 CONF_HANDLE_BOOL(opt_zero, "zero", true)
Jason Evans7372b152012-02-10 20:22:09 -08001134 }
Jason Evansb1476112012-04-05 13:36:17 -07001135 if (config_utrace) {
Jason Evansbd87b012014-04-15 16:35:08 -07001136 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
Jason Evansb1476112012-04-05 13:36:17 -07001137 }
Jason Evans7372b152012-02-10 20:22:09 -08001138 if (config_xmalloc) {
Jason Evansbd87b012014-04-15 16:35:08 -07001139 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
Jason Evans7372b152012-02-10 20:22:09 -08001140 }
1141 if (config_tcache) {
Jason Evansbd87b012014-04-15 16:35:08 -07001142 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1143 !config_valgrind || !in_valgrind)
1144 if (CONF_MATCH("tcache")) {
1145 assert(config_valgrind && in_valgrind);
1146 if (opt_tcache) {
1147 opt_tcache = false;
1148 malloc_conf_error(
1149 "tcache cannot be enabled "
1150 "while running inside Valgrind",
1151 k, klen, v, vlen);
1152 }
1153 continue;
1154 }
Jason Evansd81e4bd2012-03-06 14:57:45 -08001155 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -07001156 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -08001157 (sizeof(size_t) << 3) - 1)
1158 }
1159 if (config_prof) {
Jason Evansbd87b012014-04-15 16:35:08 -07001160 CONF_HANDLE_BOOL(opt_prof, "prof", true)
Jason Evans606f1fd2012-04-20 21:39:14 -07001161 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1162 "prof_prefix", "jeprof")
Jason Evansbd87b012014-04-15 16:35:08 -07001163 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1164 true)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001165 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1166 "prof_thread_active_init", true)
Jason Evans602c8e02014-08-18 16:22:13 -07001167 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
Jason Evans606f1fd2012-04-20 21:39:14 -07001168 "lg_prof_sample", 0,
Jason Evans602c8e02014-08-18 16:22:13 -07001169 (sizeof(uint64_t) << 3) - 1, true)
Jason Evansbd87b012014-04-15 16:35:08 -07001170 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1171 true)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001172 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -07001173 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -08001174 (sizeof(uint64_t) << 3) - 1)
Jason Evansbd87b012014-04-15 16:35:08 -07001175 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1176 true)
1177 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1178 true)
1179 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1180 true)
Jason Evans7372b152012-02-10 20:22:09 -08001181 }
Jason Evanse7339702010-10-23 18:37:06 -07001182 malloc_conf_error("Invalid conf pair", k, klen, v,
1183 vlen);
Jason Evansbd87b012014-04-15 16:35:08 -07001184#undef CONF_MATCH
Jason Evanse7339702010-10-23 18:37:06 -07001185#undef CONF_HANDLE_BOOL
1186#undef CONF_HANDLE_SIZE_T
1187#undef CONF_HANDLE_SSIZE_T
1188#undef CONF_HANDLE_CHAR_P
1189 }
Jason Evanse7339702010-10-23 18:37:06 -07001190 }
1191}
1192
Jason Evans10aff3f2015-01-20 15:37:51 -08001193/* init_lock must be held. */
Jason Evanse7339702010-10-23 18:37:06 -07001194static bool
Jason Evans10aff3f2015-01-20 15:37:51 -08001195malloc_init_hard_needed(void)
Jason Evanse7339702010-10-23 18:37:06 -07001196{
Jason Evans289053c2009-06-22 12:08:42 -07001197
Jason Evans10aff3f2015-01-20 15:37:51 -08001198 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1199 malloc_init_recursible)) {
Jason Evans289053c2009-06-22 12:08:42 -07001200 /*
1201 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -08001202 * acquired init_lock, or this thread is the initializing
1203 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -07001204 */
Jason Evans289053c2009-06-22 12:08:42 -07001205 return (false);
1206 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001207#ifdef JEMALLOC_THREADED_INIT
Jason Evans551ebc42014-10-03 10:16:09 -07001208 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
Jason Evansb7924f52009-06-23 19:01:18 -07001209 /* Busy-wait until the initializing thread completes. */
1210 do {
1211 malloc_mutex_unlock(&init_lock);
1212 CPU_SPINWAIT;
1213 malloc_mutex_lock(&init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001214 } while (!malloc_initialized());
Jason Evansb7924f52009-06-23 19:01:18 -07001215 return (false);
1216 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001217#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08001218 return (true);
1219}
Jason Evans289053c2009-06-22 12:08:42 -07001220
Jason Evans10aff3f2015-01-20 15:37:51 -08001221/* init_lock must be held. */
1222static bool
1223malloc_init_hard_a0_locked(void)
1224{
1225
1226 malloc_initializer = INITIALIZER;
Jason Evans5460aa62014-09-22 21:09:23 -07001227
Jason Evans7372b152012-02-10 20:22:09 -08001228 if (config_prof)
1229 prof_boot0();
Jason Evanse7339702010-10-23 18:37:06 -07001230 malloc_conf_init();
Jason Evans03c22372010-01-03 12:10:42 -08001231 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -07001232 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -08001233 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001234 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -08001235 if (opt_abort)
1236 abort();
1237 }
Jason Evans289053c2009-06-22 12:08:42 -07001238 }
Jason Evans10aff3f2015-01-20 15:37:51 -08001239 if (base_boot())
Jason Evansa0bf2422010-01-29 14:30:41 -08001240 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001241 if (chunk_boot())
Jason Evans3c234352010-01-27 13:10:55 -08001242 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001243 if (ctl_boot())
Jason Evans41b6afb2012-02-02 22:04:57 -08001244 return (true);
Jason Evans7372b152012-02-10 20:22:09 -08001245 if (config_prof)
1246 prof_boot1();
Jason Evans8a03cf02015-05-04 09:58:36 -07001247 if (arena_boot())
1248 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001249 if (config_tcache && tcache_boot())
Jason Evans84c8eef2011-03-16 10:30:13 -07001250 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001251 if (malloc_mutex_init(&arenas_lock))
Jason Evans8e6f8b42011-11-03 18:40:03 -07001252 return (true);
Jason Evansb7924f52009-06-23 19:01:18 -07001253 /*
1254 * Create enough scaffolding to allow recursive allocation in
1255 * malloc_ncpus().
1256 */
Christopher Ferrise4294032016-03-02 14:33:02 -08001257 narenas_auto = 1;
1258 narenas_total_set(narenas_auto);
Jason Evans10aff3f2015-01-20 15:37:51 -08001259 arenas = &a0;
Jason Evans609ae592012-10-11 13:53:15 -07001260 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
Jason Evansb7924f52009-06-23 19:01:18 -07001261 /*
1262 * Initialize one arena here. The rest are lazily created in
Jason Evans8bb31982014-10-07 23:14:57 -07001263 * arena_choose_hard().
Jason Evansb7924f52009-06-23 19:01:18 -07001264 */
Jason Evans10aff3f2015-01-20 15:37:51 -08001265 if (arena_init(0) == NULL)
Jason Evansb7924f52009-06-23 19:01:18 -07001266 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001267 malloc_init_state = malloc_init_a0_initialized;
1268 return (false);
1269}
Jason Evansb7924f52009-06-23 19:01:18 -07001270
Jason Evans10aff3f2015-01-20 15:37:51 -08001271static bool
1272malloc_init_hard_a0(void)
1273{
1274 bool ret;
Jason Evans6da54182012-03-23 18:05:51 -07001275
Jason Evans10aff3f2015-01-20 15:37:51 -08001276 malloc_mutex_lock(&init_lock);
1277 ret = malloc_init_hard_a0_locked();
Jason Evansb7924f52009-06-23 19:01:18 -07001278 malloc_mutex_unlock(&init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001279 return (ret);
1280}
1281
1282/*
1283 * Initialize data structures which may trigger recursive allocation.
1284 *
1285 * init_lock must be held.
1286 */
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001287static bool
Jason Evans10aff3f2015-01-20 15:37:51 -08001288malloc_init_hard_recursible(void)
1289{
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001290 bool ret = false;
Jason Evans10aff3f2015-01-20 15:37:51 -08001291
1292 malloc_init_state = malloc_init_recursible;
1293 malloc_mutex_unlock(&init_lock);
Leonard Crestezac4403c2013-10-22 00:11:09 +03001294
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001295 /* LinuxThreads' pthread_setspecific() allocates. */
1296 if (malloc_tsd_boot0()) {
1297 ret = true;
1298 goto label_return;
1299 }
1300
Jason Evansb7924f52009-06-23 19:01:18 -07001301 ncpus = malloc_ncpus();
Leonard Crestezac4403c2013-10-22 00:11:09 +03001302
1303#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
Richard Diamond94ed6812014-05-28 21:47:15 -05001304 && !defined(_WIN32) && !defined(__native_client__))
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001305 /* LinuxThreads' pthread_atfork() allocates. */
Leonard Crestezac4403c2013-10-22 00:11:09 +03001306 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1307 jemalloc_postfork_child) != 0) {
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001308 ret = true;
Leonard Crestezac4403c2013-10-22 00:11:09 +03001309 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1310 if (opt_abort)
1311 abort();
1312 }
1313#endif
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001314
1315label_return:
Jason Evansb7924f52009-06-23 19:01:18 -07001316 malloc_mutex_lock(&init_lock);
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001317 return (ret);
Jason Evans10aff3f2015-01-20 15:37:51 -08001318}
Jason Evansb7924f52009-06-23 19:01:18 -07001319
Jason Evans10aff3f2015-01-20 15:37:51 -08001320/* init_lock must be held. */
1321static bool
1322malloc_init_hard_finish(void)
1323{
1324
1325 if (mutex_boot())
Jason Evans633aaff2012-04-03 08:47:07 -07001326 return (true);
Jason Evans633aaff2012-04-03 08:47:07 -07001327
Jason Evanse7339702010-10-23 18:37:06 -07001328 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -07001329 /*
Jason Evans5463a522009-12-29 00:09:15 -08001330 * For SMP systems, create more than one arena per CPU by
1331 * default.
Jason Evans289053c2009-06-22 12:08:42 -07001332 */
Jason Evanse7339702010-10-23 18:37:06 -07001333 if (ncpus > 1)
1334 opt_narenas = ncpus << 2;
1335 else
1336 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001337 }
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07001338#if defined(ANDROID_MAX_ARENAS)
1339 /* Never create more than MAX_ARENAS arenas regardless of num_cpus.
1340 * Extra arenas use more PSS and are not very useful unless
1341 * lots of threads are allocing/freeing at the same time.
1342 */
1343 if (opt_narenas > ANDROID_MAX_ARENAS)
1344 opt_narenas = ANDROID_MAX_ARENAS;
1345#endif
Jason Evans609ae592012-10-11 13:53:15 -07001346 narenas_auto = opt_narenas;
Jason Evanse7339702010-10-23 18:37:06 -07001347 /*
Christopher Ferrise4294032016-03-02 14:33:02 -08001348 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
Jason Evanse7339702010-10-23 18:37:06 -07001349 */
Christopher Ferrise4294032016-03-02 14:33:02 -08001350 if (narenas_auto > MALLOCX_ARENA_MAX) {
1351 narenas_auto = MALLOCX_ARENA_MAX;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001352 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
Jason Evans609ae592012-10-11 13:53:15 -07001353 narenas_auto);
Jason Evans289053c2009-06-22 12:08:42 -07001354 }
Christopher Ferrise4294032016-03-02 14:33:02 -08001355 narenas_total_set(narenas_auto);
Jason Evans289053c2009-06-22 12:08:42 -07001356
Jason Evans289053c2009-06-22 12:08:42 -07001357 /* Allocate and initialize arenas. */
Christopher Ferrise4294032016-03-02 14:33:02 -08001358 arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
1359 (MALLOCX_ARENA_MAX+1));
Jason Evans10aff3f2015-01-20 15:37:51 -08001360 if (arenas == NULL)
Jason Evans289053c2009-06-22 12:08:42 -07001361 return (true);
Jason Evansb7924f52009-06-23 19:01:18 -07001362 /* Copy the pointer to the one arena that was already initialized. */
Christopher Ferrise4294032016-03-02 14:33:02 -08001363 arena_set(0, a0);
Jason Evans289053c2009-06-22 12:08:42 -07001364
Jason Evans10aff3f2015-01-20 15:37:51 -08001365 malloc_init_state = malloc_init_initialized;
Qi Wangf4a0f322015-10-27 15:12:10 -07001366 malloc_slow_flag_init();
1367
Jason Evans10aff3f2015-01-20 15:37:51 -08001368 return (false);
1369}
1370
1371static bool
1372malloc_init_hard(void)
1373{
1374
Mike Hommey0a116fa2015-09-03 15:48:48 +09001375#if defined(_WIN32) && _WIN32_WINNT < 0x0600
1376 _init_init_lock();
1377#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08001378 malloc_mutex_lock(&init_lock);
1379 if (!malloc_init_hard_needed()) {
1380 malloc_mutex_unlock(&init_lock);
1381 return (false);
1382 }
1383
1384 if (malloc_init_state != malloc_init_a0_initialized &&
1385 malloc_init_hard_a0_locked()) {
1386 malloc_mutex_unlock(&init_lock);
1387 return (true);
1388 }
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001389
1390 if (malloc_init_hard_recursible()) {
Jason Evans10aff3f2015-01-20 15:37:51 -08001391 malloc_mutex_unlock(&init_lock);
1392 return (true);
1393 }
1394
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001395 if (config_prof && prof_boot2()) {
1396 malloc_mutex_unlock(&init_lock);
1397 return (true);
1398 }
Jason Evans10aff3f2015-01-20 15:37:51 -08001399
1400 if (malloc_init_hard_finish()) {
1401 malloc_mutex_unlock(&init_lock);
1402 return (true);
1403 }
1404
Jason Evans289053c2009-06-22 12:08:42 -07001405 malloc_mutex_unlock(&init_lock);
Jason Evans8bb31982014-10-07 23:14:57 -07001406 malloc_tsd_boot1();
Jason Evans289053c2009-06-22 12:08:42 -07001407 return (false);
1408}
1409
1410/*
Jason Evanse476f8a2010-01-16 09:53:50 -08001411 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -07001412 */
1413/******************************************************************************/
1414/*
1415 * Begin malloc(3)-compatible functions.
1416 */
1417
Jason Evansb2c31662014-01-12 15:05:44 -08001418static void *
Qi Wangf4a0f322015-10-27 15:12:10 -07001419imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
1420 prof_tctx_t *tctx, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001421{
1422 void *p;
1423
Jason Evans602c8e02014-08-18 16:22:13 -07001424 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001425 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001426 if (usize <= SMALL_MAXCLASS) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001427 szind_t ind_large = size2index(LARGE_MINCLASS);
1428 p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001429 if (p == NULL)
1430 return (NULL);
1431 arena_prof_promoted(p, usize);
1432 } else
Qi Wangf4a0f322015-10-27 15:12:10 -07001433 p = imalloc(tsd, usize, ind, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001434
1435 return (p);
1436}
1437
1438JEMALLOC_ALWAYS_INLINE_C void *
Qi Wangf4a0f322015-10-27 15:12:10 -07001439imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001440{
1441 void *p;
Jason Evans602c8e02014-08-18 16:22:13 -07001442 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001443
Jason Evanscec0d632015-09-14 23:17:25 -07001444 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001445 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Qi Wangf4a0f322015-10-27 15:12:10 -07001446 p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001447 else
Qi Wangf4a0f322015-10-27 15:12:10 -07001448 p = imalloc(tsd, usize, ind, slow_path);
Jason Evanscfc57062014-10-30 23:18:45 -07001449 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001450 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001451 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001452 }
Jason Evans602c8e02014-08-18 16:22:13 -07001453 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001454
1455 return (p);
1456}
1457
Jason Evans6f001052014-04-22 18:41:15 -07001458JEMALLOC_ALWAYS_INLINE_C void *
Qi Wangf4a0f322015-10-27 15:12:10 -07001459imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
Jason Evans6f001052014-04-22 18:41:15 -07001460{
Qi Wangf4a0f322015-10-27 15:12:10 -07001461 szind_t ind;
Jason Evans6f001052014-04-22 18:41:15 -07001462
Qi Wangf4a0f322015-10-27 15:12:10 -07001463 if (slow_path && unlikely(malloc_init()))
Jason Evans6f001052014-04-22 18:41:15 -07001464 return (NULL);
Jason Evans029d44c2014-10-04 11:12:53 -07001465 *tsd = tsd_fetch();
Qi Wangf4a0f322015-10-27 15:12:10 -07001466 ind = size2index(size);
Christopher Ferrise4294032016-03-02 14:33:02 -08001467 if (unlikely(ind >= NSIZES))
1468 return (NULL);
Jason Evans6f001052014-04-22 18:41:15 -07001469
Christopher Ferrise4294032016-03-02 14:33:02 -08001470 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1471 config_valgrind && unlikely(in_valgrind))) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001472 *usize = index2size(ind);
Christopher Ferrise4294032016-03-02 14:33:02 -08001473 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
Jason Evans6f001052014-04-22 18:41:15 -07001474 }
1475
Christopher Ferrise4294032016-03-02 14:33:02 -08001476 if (config_prof && opt_prof)
Qi Wangf4a0f322015-10-27 15:12:10 -07001477 return (imalloc_prof(*tsd, *usize, ind, slow_path));
Qi Wangf4a0f322015-10-27 15:12:10 -07001478
1479 return (imalloc(*tsd, size, ind, slow_path));
1480}
1481
1482JEMALLOC_ALWAYS_INLINE_C void
1483imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
1484{
1485 if (unlikely(ret == NULL)) {
1486 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1487 malloc_write("<jemalloc>: Error in malloc(): "
1488 "out of memory\n");
1489 abort();
1490 }
1491 set_errno(ENOMEM);
1492 }
1493 if (config_stats && likely(ret != NULL)) {
1494 assert(usize == isalloc(ret, config_prof));
1495 *tsd_thread_allocatedp_get(tsd) += usize;
1496 }
Jason Evans6f001052014-04-22 18:41:15 -07001497}
Jason Evansb2c31662014-01-12 15:05:44 -08001498
Matthijsc1a6a512015-07-27 22:48:27 +02001499JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1500void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001501JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
Jason Evans0a5489e2012-03-01 17:19:20 -08001502je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001503{
1504 void *ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001505 tsd_t *tsd;
Jason Evans8694e2e2012-04-23 13:05:32 -07001506 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001507
Jason Evansc90ad712012-02-28 20:31:37 -08001508 if (size == 0)
1509 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001510
Qi Wangf4a0f322015-10-27 15:12:10 -07001511 if (likely(!malloc_slow)) {
1512 /*
1513 * imalloc_body() is inlined so that fast and slow paths are
1514 * generated separately with statically known slow_path.
1515 */
1516 ret = imalloc_body(size, &tsd, &usize, false);
1517 imalloc_post_check(ret, tsd, usize, false);
1518 } else {
1519 ret = imalloc_body(size, &tsd, &usize, true);
1520 imalloc_post_check(ret, tsd, usize, true);
1521 UTRACE(0, size, ret);
1522 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001523 }
Qi Wangf4a0f322015-10-27 15:12:10 -07001524
Jason Evans289053c2009-06-22 12:08:42 -07001525 return (ret);
1526}
1527
Jason Evansb2c31662014-01-12 15:05:44 -08001528static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001529imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1530 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001531{
1532 void *p;
1533
Jason Evans602c8e02014-08-18 16:22:13 -07001534 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001535 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001536 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07001537 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
Jason Evans241abc62015-06-23 18:47:07 -07001538 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001539 if (p == NULL)
1540 return (NULL);
1541 arena_prof_promoted(p, usize);
1542 } else
Jason Evans5460aa62014-09-22 21:09:23 -07001543 p = ipalloc(tsd, usize, alignment, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001544
1545 return (p);
1546}
1547
1548JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001549imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001550{
1551 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001552 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001553
Jason Evanscec0d632015-09-14 23:17:25 -07001554 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001555 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans5460aa62014-09-22 21:09:23 -07001556 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001557 else
Jason Evans5460aa62014-09-22 21:09:23 -07001558 p = ipalloc(tsd, usize, alignment, false);
Jason Evanscfc57062014-10-30 23:18:45 -07001559 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001560 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001561 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001562 }
Jason Evans602c8e02014-08-18 16:22:13 -07001563 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001564
1565 return (p);
1566}
1567
Jason Evans9ad48232010-01-03 11:59:20 -08001568JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -07001569static int
Jason Evansb2c31662014-01-12 15:05:44 -08001570imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -07001571{
1572 int ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001573 tsd_t *tsd;
Jason Evans7372b152012-02-10 20:22:09 -08001574 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -07001575 void *result;
Jason Evans289053c2009-06-22 12:08:42 -07001576
Jason Evans0a0bbf62012-03-13 12:55:21 -07001577 assert(min_alignment != 0);
1578
Jason Evans029d44c2014-10-04 11:12:53 -07001579 if (unlikely(malloc_init())) {
Jason Evans289053c2009-06-22 12:08:42 -07001580 result = NULL;
Jason Evansb2c31662014-01-12 15:05:44 -08001581 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -07001582 }
Jason Evansdc0610a2015-06-22 18:48:58 -07001583 tsd = tsd_fetch();
1584 if (size == 0)
1585 size = 1;
1586
1587 /* Make sure that alignment is a large enough power of 2. */
1588 if (unlikely(((alignment - 1) & alignment) != 0
1589 || (alignment < min_alignment))) {
1590 if (config_xmalloc && unlikely(opt_xmalloc)) {
1591 malloc_write("<jemalloc>: Error allocating "
1592 "aligned memory: invalid alignment\n");
1593 abort();
1594 }
1595 result = NULL;
1596 ret = EINVAL;
1597 goto label_return;
1598 }
1599
1600 usize = sa2u(size, alignment);
Christopher Ferrise4294032016-03-02 14:33:02 -08001601 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
Jason Evansdc0610a2015-06-22 18:48:58 -07001602 result = NULL;
1603 goto label_oom;
1604 }
1605
1606 if (config_prof && opt_prof)
1607 result = imemalign_prof(tsd, alignment, usize);
1608 else
1609 result = ipalloc(tsd, usize, alignment, false);
1610 if (unlikely(result == NULL))
1611 goto label_oom;
1612 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
Jason Evans289053c2009-06-22 12:08:42 -07001613
1614 *memptr = result;
1615 ret = 0;
Jason Evansa1ee7832012-04-10 15:07:44 -07001616label_return:
Jason Evans9c640bf2014-09-11 16:20:44 -07001617 if (config_stats && likely(result != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001618 assert(usize == isalloc(result, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001619 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001620 }
Jason Evansb1476112012-04-05 13:36:17 -07001621 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -07001622 return (ret);
Jason Evansb2c31662014-01-12 15:05:44 -08001623label_oom:
1624 assert(result == NULL);
Jason Evans9c640bf2014-09-11 16:20:44 -07001625 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001626 malloc_write("<jemalloc>: Error allocating aligned memory: "
1627 "out of memory\n");
1628 abort();
1629 }
1630 ret = ENOMEM;
1631 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001632}
1633
Jason Evans00632602015-07-21 08:10:38 -07001634JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1635JEMALLOC_ATTR(nonnull(1))
Jason Evans0a5489e2012-03-01 17:19:20 -08001636je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -07001637{
Jason Evans122449b2012-04-06 00:35:09 -07001638 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1639 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1640 config_prof), false);
1641 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001642}
1643
Matthijsc1a6a512015-07-27 22:48:27 +02001644JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1645void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001646JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
Jason Evans0a0bbf62012-03-13 12:55:21 -07001647je_aligned_alloc(size_t alignment, size_t size)
1648{
1649 void *ret;
1650 int err;
1651
Jason Evans9c640bf2014-09-11 16:20:44 -07001652 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
Jason Evans0a0bbf62012-03-13 12:55:21 -07001653 ret = NULL;
Mike Hommeya14bce82012-04-30 12:38:26 +02001654 set_errno(err);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001655 }
Jason Evans122449b2012-04-06 00:35:09 -07001656 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1657 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001658 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -07001659}
1660
Jason Evansb2c31662014-01-12 15:05:44 -08001661static void *
Qi Wangf4a0f322015-10-27 15:12:10 -07001662icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001663{
1664 void *p;
1665
Jason Evans602c8e02014-08-18 16:22:13 -07001666 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001667 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001668 if (usize <= SMALL_MAXCLASS) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001669 szind_t ind_large = size2index(LARGE_MINCLASS);
1670 p = icalloc(tsd, LARGE_MINCLASS, ind_large);
Jason Evansb2c31662014-01-12 15:05:44 -08001671 if (p == NULL)
1672 return (NULL);
1673 arena_prof_promoted(p, usize);
1674 } else
Qi Wangf4a0f322015-10-27 15:12:10 -07001675 p = icalloc(tsd, usize, ind);
Jason Evansb2c31662014-01-12 15:05:44 -08001676
1677 return (p);
1678}
1679
1680JEMALLOC_ALWAYS_INLINE_C void *
Qi Wangf4a0f322015-10-27 15:12:10 -07001681icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
Jason Evansb2c31662014-01-12 15:05:44 -08001682{
1683 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001684 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001685
Jason Evanscec0d632015-09-14 23:17:25 -07001686 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001687 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Qi Wangf4a0f322015-10-27 15:12:10 -07001688 p = icalloc_prof_sample(tsd, usize, ind, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001689 else
Qi Wangf4a0f322015-10-27 15:12:10 -07001690 p = icalloc(tsd, usize, ind);
Jason Evanscfc57062014-10-30 23:18:45 -07001691 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001692 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001693 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001694 }
Jason Evans602c8e02014-08-18 16:22:13 -07001695 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001696
1697 return (p);
1698}
1699
Matthijsc1a6a512015-07-27 22:48:27 +02001700JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1701void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001702JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
Jason Evans0a5489e2012-03-01 17:19:20 -08001703je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001704{
1705 void *ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001706 tsd_t *tsd;
Jason Evans289053c2009-06-22 12:08:42 -07001707 size_t num_size;
Qi Wangf4a0f322015-10-27 15:12:10 -07001708 szind_t ind;
Jason Evans8694e2e2012-04-23 13:05:32 -07001709 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001710
Jason Evans029d44c2014-10-04 11:12:53 -07001711 if (unlikely(malloc_init())) {
Jason Evans289053c2009-06-22 12:08:42 -07001712 num_size = 0;
1713 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001714 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001715 }
Jason Evans029d44c2014-10-04 11:12:53 -07001716 tsd = tsd_fetch();
Jason Evans289053c2009-06-22 12:08:42 -07001717
1718 num_size = num * size;
Jason Evans9c640bf2014-09-11 16:20:44 -07001719 if (unlikely(num_size == 0)) {
Jason Evansc90ad712012-02-28 20:31:37 -08001720 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001721 num_size = 1;
1722 else {
1723 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001724 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001725 }
1726 /*
1727 * Try to avoid division here. We know that it isn't possible to
1728 * overflow during multiplication if neither operand uses any of the
1729 * most significant half of the bits in a size_t.
1730 */
Jason Evans9c640bf2014-09-11 16:20:44 -07001731 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1732 2))) && (num_size / size != num))) {
Jason Evans289053c2009-06-22 12:08:42 -07001733 /* size_t overflow. */
1734 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001735 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001736 }
1737
Qi Wangf4a0f322015-10-27 15:12:10 -07001738 ind = size2index(num_size);
Christopher Ferrise4294032016-03-02 14:33:02 -08001739 if (unlikely(ind >= NSIZES)) {
1740 ret = NULL;
1741 goto label_return;
1742 }
Jason Evans7372b152012-02-10 20:22:09 -08001743 if (config_prof && opt_prof) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001744 usize = index2size(ind);
Qi Wangf4a0f322015-10-27 15:12:10 -07001745 ret = icalloc_prof(tsd, usize, ind);
Jason Evans7372b152012-02-10 20:22:09 -08001746 } else {
Jason Evans9c640bf2014-09-11 16:20:44 -07001747 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Qi Wangf4a0f322015-10-27 15:12:10 -07001748 usize = index2size(ind);
1749 ret = icalloc(tsd, num_size, ind);
Jason Evans93443682010-10-20 17:39:18 -07001750 }
Jason Evans289053c2009-06-22 12:08:42 -07001751
Jason Evansa1ee7832012-04-10 15:07:44 -07001752label_return:
Jason Evans9c640bf2014-09-11 16:20:44 -07001753 if (unlikely(ret == NULL)) {
1754 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evans698805c2010-03-03 17:45:38 -08001755 malloc_write("<jemalloc>: Error in calloc(): out of "
1756 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001757 abort();
1758 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001759 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001760 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001761 if (config_stats && likely(ret != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001762 assert(usize == isalloc(ret, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001763 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001764 }
Jason Evansb1476112012-04-05 13:36:17 -07001765 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001766 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001767 return (ret);
1768}
1769
Jason Evansb2c31662014-01-12 15:05:44 -08001770static void *
Jason Evansd9704042015-09-14 23:28:32 -07001771irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
Daniel Micayd33f8342014-10-24 13:18:57 -04001772 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001773{
1774 void *p;
1775
Jason Evans602c8e02014-08-18 16:22:13 -07001776 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001777 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001778 if (usize <= SMALL_MAXCLASS) {
Jason Evansd9704042015-09-14 23:28:32 -07001779 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001780 if (p == NULL)
1781 return (NULL);
1782 arena_prof_promoted(p, usize);
1783 } else
Jason Evansd9704042015-09-14 23:28:32 -07001784 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001785
1786 return (p);
1787}
1788
1789JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansd9704042015-09-14 23:28:32 -07001790irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001791{
1792 void *p;
Jason Evanscec0d632015-09-14 23:17:25 -07001793 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07001794 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001795
Jason Evanscec0d632015-09-14 23:17:25 -07001796 prof_active = prof_active_get_unlocked();
Jason Evansd9704042015-09-14 23:28:32 -07001797 old_tctx = prof_tctx_get(old_ptr);
Jason Evanscec0d632015-09-14 23:17:25 -07001798 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001799 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evansd9704042015-09-14 23:28:32 -07001800 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001801 else
Jason Evansd9704042015-09-14 23:28:32 -07001802 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
Jason Evansef363de2015-09-14 22:45:31 -07001803 if (unlikely(p == NULL)) {
1804 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001805 return (NULL);
Jason Evansef363de2015-09-14 22:45:31 -07001806 }
Jason Evans708ed792015-09-14 23:48:11 -07001807 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
Jason Evanscec0d632015-09-14 23:17:25 -07001808 old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001809
1810 return (p);
1811}
1812
1813JEMALLOC_INLINE_C void
Qi Wangf4a0f322015-10-27 15:12:10 -07001814ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001815{
1816 size_t usize;
1817 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1818
1819 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08001820 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansb2c31662014-01-12 15:05:44 -08001821
1822 if (config_prof && opt_prof) {
1823 usize = isalloc(ptr, config_prof);
Jason Evans5460aa62014-09-22 21:09:23 -07001824 prof_free(tsd, ptr, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001825 } else if (config_stats || config_valgrind)
1826 usize = isalloc(ptr, config_prof);
Jason Evans029d44c2014-10-04 11:12:53 -07001827 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001828 *tsd_thread_deallocatedp_get(tsd) += usize;
Qi Wangf4a0f322015-10-27 15:12:10 -07001829
1830 if (likely(!slow_path))
1831 iqalloc(tsd, ptr, tcache, false);
1832 else {
1833 if (config_valgrind && unlikely(in_valgrind))
1834 rzsize = p2rz(ptr);
1835 iqalloc(tsd, ptr, tcache, true);
1836 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1837 }
Jason Evansb2c31662014-01-12 15:05:44 -08001838}
1839
Daniel Micay4cfe5512014-08-28 15:41:48 -04001840JEMALLOC_INLINE_C void
Jason Evans1cb181e2015-01-29 15:30:47 -08001841isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
Daniel Micay4cfe5512014-08-28 15:41:48 -04001842{
1843 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1844
1845 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08001846 assert(malloc_initialized() || IS_INITIALIZER);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001847
1848 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07001849 prof_free(tsd, ptr, usize);
Jason Evans029d44c2014-10-04 11:12:53 -07001850 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001851 *tsd_thread_deallocatedp_get(tsd) += usize;
Jason Evans9c640bf2014-09-11 16:20:44 -07001852 if (config_valgrind && unlikely(in_valgrind))
Daniel Micay4cfe5512014-08-28 15:41:48 -04001853 rzsize = p2rz(ptr);
Jason Evans1cb181e2015-01-29 15:30:47 -08001854 isqalloc(tsd, ptr, usize, tcache);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001855 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1856}
1857
Matthijsc1a6a512015-07-27 22:48:27 +02001858JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1859void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001860JEMALLOC_ALLOC_SIZE(2)
Jason Evans0a5489e2012-03-01 17:19:20 -08001861je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001862{
1863 void *ret;
Jason Evans0800afd2014-10-04 14:59:17 -07001864 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans8694e2e2012-04-23 13:05:32 -07001865 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans66576932013-12-15 16:21:30 -08001866 size_t old_usize = 0;
Jason Evans73692322013-12-10 13:51:52 -08001867 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans6109fe02010-02-10 10:37:56 -08001868
Jason Evans9c640bf2014-09-11 16:20:44 -07001869 if (unlikely(size == 0)) {
Jason Evansf081b882012-02-28 20:24:05 -08001870 if (ptr != NULL) {
Jason Evansb2c31662014-01-12 15:05:44 -08001871 /* realloc(ptr, 0) is equivalent to free(ptr). */
1872 UTRACE(ptr, 0, 0);
Jason Evans029d44c2014-10-04 11:12:53 -07001873 tsd = tsd_fetch();
Qi Wangf4a0f322015-10-27 15:12:10 -07001874 ifree(tsd, ptr, tcache_get(tsd, false), true);
Jason Evansb2c31662014-01-12 15:05:44 -08001875 return (NULL);
1876 }
1877 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001878 }
1879
Jason Evans9c640bf2014-09-11 16:20:44 -07001880 if (likely(ptr != NULL)) {
Jason Evans10aff3f2015-01-20 15:37:51 -08001881 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08001882 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07001883 tsd = tsd_fetch();
Jason Evans289053c2009-06-22 12:08:42 -07001884
Daniel Micayd33f8342014-10-24 13:18:57 -04001885 old_usize = isalloc(ptr, config_prof);
Jason Evans029d44c2014-10-04 11:12:53 -07001886 if (config_valgrind && unlikely(in_valgrind))
1887 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001888
Jason Evans029d44c2014-10-04 11:12:53 -07001889 if (config_prof && opt_prof) {
1890 usize = s2u(size);
Christopher Ferrise4294032016-03-02 14:33:02 -08001891 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1892 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
Jason Evans029d44c2014-10-04 11:12:53 -07001893 } else {
1894 if (config_stats || (config_valgrind &&
1895 unlikely(in_valgrind)))
Jason Evans7372b152012-02-10 20:22:09 -08001896 usize = s2u(size);
Daniel Micayd33f8342014-10-24 13:18:57 -04001897 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
Jason Evans029d44c2014-10-04 11:12:53 -07001898 }
Jason Evans289053c2009-06-22 12:08:42 -07001899 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001900 /* realloc(NULL, size) is equivalent to malloc(size). */
Qi Wangf4a0f322015-10-27 15:12:10 -07001901 if (likely(!malloc_slow))
1902 ret = imalloc_body(size, &tsd, &usize, false);
1903 else
1904 ret = imalloc_body(size, &tsd, &usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001905 }
1906
Jason Evans9c640bf2014-09-11 16:20:44 -07001907 if (unlikely(ret == NULL)) {
1908 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001909 malloc_write("<jemalloc>: Error in realloc(): "
1910 "out of memory\n");
1911 abort();
1912 }
1913 set_errno(ENOMEM);
1914 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001915 if (config_stats && likely(ret != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001916 assert(usize == isalloc(ret, config_prof));
Jason Evans029d44c2014-10-04 11:12:53 -07001917 *tsd_thread_allocatedp_get(tsd) += usize;
1918 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evans93443682010-10-20 17:39:18 -07001919 }
Jason Evansb1476112012-04-05 13:36:17 -07001920 UTRACE(ptr, size, ret);
Jason Evansbd87b012014-04-15 16:35:08 -07001921 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1922 old_rzsize, true, false);
Jason Evans289053c2009-06-22 12:08:42 -07001923 return (ret);
1924}
1925
Jason Evans00632602015-07-21 08:10:38 -07001926JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08001927je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001928{
1929
Jason Evansb1476112012-04-05 13:36:17 -07001930 UTRACE(ptr, 0, 0);
Jason Evans1cb181e2015-01-29 15:30:47 -08001931 if (likely(ptr != NULL)) {
1932 tsd_t *tsd = tsd_fetch();
Qi Wangf4a0f322015-10-27 15:12:10 -07001933 if (likely(!malloc_slow))
1934 ifree(tsd, ptr, tcache_get(tsd, false), false);
1935 else
1936 ifree(tsd, ptr, tcache_get(tsd, false), true);
Jason Evans1cb181e2015-01-29 15:30:47 -08001937 }
Jason Evans289053c2009-06-22 12:08:42 -07001938}
1939
1940/*
1941 * End malloc(3)-compatible functions.
1942 */
1943/******************************************************************************/
1944/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001945 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001946 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001947
1948#ifdef JEMALLOC_OVERRIDE_MEMALIGN
Matthijsc1a6a512015-07-27 22:48:27 +02001949JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1950void JEMALLOC_NOTHROW *
Jason Evansae93d6b2015-07-10 14:33:00 -07001951JEMALLOC_ATTR(malloc)
Jason Evans0a5489e2012-03-01 17:19:20 -08001952je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001953{
Jason Evans9225a192012-03-23 15:39:07 -07001954 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans44b57b82015-01-16 18:04:17 -08001955 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1956 ret = NULL;
Jason Evans122449b2012-04-06 00:35:09 -07001957 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001958 return (ret);
1959}
1960#endif
1961
1962#ifdef JEMALLOC_OVERRIDE_VALLOC
Matthijsc1a6a512015-07-27 22:48:27 +02001963JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1964void JEMALLOC_NOTHROW *
Jason Evansae93d6b2015-07-10 14:33:00 -07001965JEMALLOC_ATTR(malloc)
Jason Evans0a5489e2012-03-01 17:19:20 -08001966je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001967{
Jason Evans9225a192012-03-23 15:39:07 -07001968 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans44b57b82015-01-16 18:04:17 -08001969 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1970 ret = NULL;
Jason Evans122449b2012-04-06 00:35:09 -07001971 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001972 return (ret);
1973}
1974#endif
1975
Mike Hommey5c89c502012-03-26 17:46:57 +02001976/*
1977 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1978 * #define je_malloc malloc
1979 */
1980#define malloc_is_malloc 1
1981#define is_malloc_(a) malloc_is_ ## a
1982#define is_malloc(a) is_malloc_(a)
1983
Sara Golemon3e24afa2014-08-18 13:06:39 -07001984#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
Jason Evans4bb09832012-02-29 10:37:27 -08001985/*
1986 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1987 * to inconsistently reference libc's malloc(3)-compatible functions
1988 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1989 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001990 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001991 * passed an extra argument for the caller return address, which will be
1992 * ignored.
1993 */
Jason Evansa344dd02014-05-01 15:51:30 -07001994JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1995JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1996JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
Sara Golemon3e24afa2014-08-18 13:06:39 -07001997# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
Jason Evansa344dd02014-05-01 15:51:30 -07001998JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
Mike Hommeyda99e312012-04-30 12:38:29 +02001999 je_memalign;
Sara Golemon3e24afa2014-08-18 13:06:39 -07002000# endif
Jason Evans4bb09832012-02-29 10:37:27 -08002001#endif
2002
Jason Evans6a0d2912010-09-20 16:44:23 -07002003/*
2004 * End non-standard override functions.
2005 */
2006/******************************************************************************/
2007/*
Jason Evans289053c2009-06-22 12:08:42 -07002008 * Begin non-standard functions.
2009 */
2010
Jason Evans8bb31982014-10-07 23:14:57 -07002011JEMALLOC_ALWAYS_INLINE_C bool
2012imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002013 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
Jason Evansb718cf72014-09-07 14:40:19 -07002014{
2015
2016 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2017 *alignment = 0;
2018 *usize = s2u(size);
2019 } else {
2020 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2021 *usize = sa2u(size, *alignment);
2022 }
Christopher Ferrise4294032016-03-02 14:33:02 -08002023 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2024 return (true);
Jason Evansb718cf72014-09-07 14:40:19 -07002025 *zero = MALLOCX_ZERO_GET(flags);
Jason Evans1cb181e2015-01-29 15:30:47 -08002026 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2027 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2028 *tcache = NULL;
2029 else
2030 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2031 } else
2032 *tcache = tcache_get(tsd, true);
Jason Evansb718cf72014-09-07 14:40:19 -07002033 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2034 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Christopher Ferrise4294032016-03-02 14:33:02 -08002035 *arena = arena_get(arena_ind, true);
Jason Evans8bb31982014-10-07 23:14:57 -07002036 if (unlikely(*arena == NULL))
2037 return (true);
Jason Evans1cb181e2015-01-29 15:30:47 -08002038 } else
Jason Evansb718cf72014-09-07 14:40:19 -07002039 *arena = NULL;
Jason Evans8bb31982014-10-07 23:14:57 -07002040 return (false);
Jason Evansb718cf72014-09-07 14:40:19 -07002041}
2042
Jason Evans8bb31982014-10-07 23:14:57 -07002043JEMALLOC_ALWAYS_INLINE_C bool
2044imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002045 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
Jason Evansb718cf72014-09-07 14:40:19 -07002046{
2047
Jason Evans9c640bf2014-09-11 16:20:44 -07002048 if (likely(flags == 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07002049 *usize = s2u(size);
Christopher Ferrise4294032016-03-02 14:33:02 -08002050 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2051 return (true);
Jason Evansb718cf72014-09-07 14:40:19 -07002052 *alignment = 0;
2053 *zero = false;
Jason Evans1cb181e2015-01-29 15:30:47 -08002054 *tcache = tcache_get(tsd, true);
Jason Evansb718cf72014-09-07 14:40:19 -07002055 *arena = NULL;
Jason Evans8bb31982014-10-07 23:14:57 -07002056 return (false);
Jason Evansb718cf72014-09-07 14:40:19 -07002057 } else {
Jason Evans8bb31982014-10-07 23:14:57 -07002058 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002059 alignment, zero, tcache, arena));
Jason Evansb718cf72014-09-07 14:40:19 -07002060 }
2061}
2062
Jason Evansd82a5e62013-12-12 22:35:52 -08002063JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07002064imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002065 tcache_t *tcache, arena_t *arena)
Jason Evans289053c2009-06-22 12:08:42 -07002066{
Qi Wangf4a0f322015-10-27 15:12:10 -07002067 szind_t ind;
Jason Evansd82a5e62013-12-12 22:35:52 -08002068
Jason Evans3263be62015-09-17 10:19:28 -07002069 if (unlikely(alignment != 0))
Jason Evans1cb181e2015-01-29 15:30:47 -08002070 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
Christopher Ferrise4294032016-03-02 14:33:02 -08002071 ind = size2index(usize);
2072 assert(ind < NSIZES);
Jason Evans3263be62015-09-17 10:19:28 -07002073 if (unlikely(zero))
Qi Wangf4a0f322015-10-27 15:12:10 -07002074 return (icalloct(tsd, usize, ind, tcache, arena));
2075 return (imalloct(tsd, usize, ind, tcache, arena));
Jason Evansd82a5e62013-12-12 22:35:52 -08002076}
2077
Jason Evansb2c31662014-01-12 15:05:44 -08002078static void *
Jason Evans3263be62015-09-17 10:19:28 -07002079imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2080 tcache_t *tcache, arena_t *arena)
Jason Evansb2c31662014-01-12 15:05:44 -08002081{
2082 void *p;
2083
Jason Evans9b0cbf02014-04-11 14:24:51 -07002084 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07002085 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2086 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
Jason Evans3263be62015-09-17 10:19:28 -07002087 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
2088 arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002089 if (p == NULL)
2090 return (NULL);
2091 arena_prof_promoted(p, usize);
Jason Evans3263be62015-09-17 10:19:28 -07002092 } else
2093 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002094
2095 return (p);
2096}
2097
2098JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07002099imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
Jason Evansb2c31662014-01-12 15:05:44 -08002100{
2101 void *p;
Jason Evansb718cf72014-09-07 14:40:19 -07002102 size_t alignment;
2103 bool zero;
Jason Evans1cb181e2015-01-29 15:30:47 -08002104 tcache_t *tcache;
Jason Evansb718cf72014-09-07 14:40:19 -07002105 arena_t *arena;
2106 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002107
Jason Evans8bb31982014-10-07 23:14:57 -07002108 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
Jason Evans1cb181e2015-01-29 15:30:47 -08002109 &zero, &tcache, &arena)))
Jason Evans8bb31982014-10-07 23:14:57 -07002110 return (NULL);
Jason Evanscec0d632015-09-14 23:17:25 -07002111 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
Jason Evans3263be62015-09-17 10:19:28 -07002112 if (likely((uintptr_t)tctx == (uintptr_t)1U))
2113 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2114 else if ((uintptr_t)tctx > (uintptr_t)1U) {
2115 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2116 arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002117 } else
Jason Evansb718cf72014-09-07 14:40:19 -07002118 p = NULL;
Jason Evans9c640bf2014-09-11 16:20:44 -07002119 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07002120 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08002121 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07002122 }
Jason Evansb718cf72014-09-07 14:40:19 -07002123 prof_malloc(p, *usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002124
Jason Evansdc0610a2015-06-22 18:48:58 -07002125 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
Jason Evansb2c31662014-01-12 15:05:44 -08002126 return (p);
2127}
2128
Jason Evansb718cf72014-09-07 14:40:19 -07002129JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07002130imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
Jason Evansb718cf72014-09-07 14:40:19 -07002131{
Jason Evansdc0610a2015-06-22 18:48:58 -07002132 void *p;
Jason Evansb718cf72014-09-07 14:40:19 -07002133 size_t alignment;
2134 bool zero;
Jason Evans1cb181e2015-01-29 15:30:47 -08002135 tcache_t *tcache;
Jason Evansb718cf72014-09-07 14:40:19 -07002136 arena_t *arena;
2137
Jason Evans9c640bf2014-09-11 16:20:44 -07002138 if (likely(flags == 0)) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002139 szind_t ind = size2index(size);
Christopher Ferrise4294032016-03-02 14:33:02 -08002140 if (unlikely(ind >= NSIZES))
2141 return (NULL);
2142 if (config_stats || (config_valgrind &&
2143 unlikely(in_valgrind))) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002144 *usize = index2size(ind);
Christopher Ferrise4294032016-03-02 14:33:02 -08002145 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2146 }
Qi Wangf4a0f322015-10-27 15:12:10 -07002147 return (imalloc(tsd, size, ind, true));
Jason Evansb718cf72014-09-07 14:40:19 -07002148 }
2149
Jason Evans8bb31982014-10-07 23:14:57 -07002150 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002151 &alignment, &zero, &tcache, &arena)))
Jason Evans8bb31982014-10-07 23:14:57 -07002152 return (NULL);
Jason Evansdc0610a2015-06-22 18:48:58 -07002153 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2154 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2155 return (p);
Jason Evansb718cf72014-09-07 14:40:19 -07002156}
2157
Matthijsc1a6a512015-07-27 22:48:27 +02002158JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2159void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07002160JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
Jason Evansd82a5e62013-12-12 22:35:52 -08002161je_mallocx(size_t size, int flags)
2162{
Jason Evans5460aa62014-09-22 21:09:23 -07002163 tsd_t *tsd;
Jason Evansd82a5e62013-12-12 22:35:52 -08002164 void *p;
2165 size_t usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002166
2167 assert(size != 0);
2168
Jason Evans029d44c2014-10-04 11:12:53 -07002169 if (unlikely(malloc_init()))
Jason Evansd82a5e62013-12-12 22:35:52 -08002170 goto label_oom;
Jason Evans029d44c2014-10-04 11:12:53 -07002171 tsd = tsd_fetch();
Jason Evansd82a5e62013-12-12 22:35:52 -08002172
Jason Evansb718cf72014-09-07 14:40:19 -07002173 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07002174 p = imallocx_prof(tsd, size, flags, &usize);
Jason Evansb718cf72014-09-07 14:40:19 -07002175 else
Jason Evans5460aa62014-09-22 21:09:23 -07002176 p = imallocx_no_prof(tsd, size, flags, &usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002177 if (unlikely(p == NULL))
Jason Evansb2c31662014-01-12 15:05:44 -08002178 goto label_oom;
Jason Evansd82a5e62013-12-12 22:35:52 -08002179
2180 if (config_stats) {
2181 assert(usize == isalloc(p, config_prof));
Jason Evans029d44c2014-10-04 11:12:53 -07002182 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002183 }
2184 UTRACE(0, size, p);
Jason Evansb718cf72014-09-07 14:40:19 -07002185 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
Jason Evansd82a5e62013-12-12 22:35:52 -08002186 return (p);
2187label_oom:
Jason Evans9c640bf2014-09-11 16:20:44 -07002188 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansd82a5e62013-12-12 22:35:52 -08002189 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2190 abort();
2191 }
2192 UTRACE(0, size, 0);
2193 return (NULL);
2194}
2195
Jason Evansb2c31662014-01-12 15:05:44 -08002196static void *
Jason Evans4be9c792015-09-17 10:17:55 -07002197irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2198 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
Jason Evans1cb181e2015-01-29 15:30:47 -08002199 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08002200{
2201 void *p;
2202
Jason Evans602c8e02014-08-18 16:22:13 -07002203 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08002204 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07002205 if (usize <= SMALL_MAXCLASS) {
Jason Evansd9704042015-09-14 23:28:32 -07002206 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
Jason Evans1cb181e2015-01-29 15:30:47 -08002207 zero, tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002208 if (p == NULL)
2209 return (NULL);
2210 arena_prof_promoted(p, usize);
2211 } else {
Jason Evans4be9c792015-09-17 10:17:55 -07002212 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002213 tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002214 }
2215
2216 return (p);
2217}
2218
2219JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansd9704042015-09-14 23:28:32 -07002220irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
Jason Evans1cb181e2015-01-29 15:30:47 -08002221 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2222 arena_t *arena)
Jason Evansb2c31662014-01-12 15:05:44 -08002223{
2224 void *p;
Jason Evanscec0d632015-09-14 23:17:25 -07002225 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07002226 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002227
Jason Evanscec0d632015-09-14 23:17:25 -07002228 prof_active = prof_active_get_unlocked();
Jason Evansd9704042015-09-14 23:28:32 -07002229 old_tctx = prof_tctx_get(old_ptr);
Jason Evanscec0d632015-09-14 23:17:25 -07002230 tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07002231 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Jason Evans4be9c792015-09-17 10:17:55 -07002232 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2233 alignment, zero, tcache, arena, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -07002234 } else {
Jason Evansd9704042015-09-14 23:28:32 -07002235 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002236 tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002237 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002238 if (unlikely(p == NULL)) {
Jason Evans46ff0492015-09-14 22:40:42 -07002239 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08002240 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07002241 }
Jason Evansb2c31662014-01-12 15:05:44 -08002242
Jason Evansd9704042015-09-14 23:28:32 -07002243 if (p == old_ptr && alignment != 0) {
Jason Evansb2c31662014-01-12 15:05:44 -08002244 /*
2245 * The allocation did not move, so it is possible that the size
2246 * class is smaller than would guarantee the requested
2247 * alignment, and that the alignment constraint was
2248 * serendipitously satisfied. Additionally, old_usize may not
2249 * be the same as the current usize because of in-place large
2250 * reallocation. Therefore, query the actual value of usize.
2251 */
2252 *usize = isalloc(p, config_prof);
2253 }
Jason Evans708ed792015-09-14 23:48:11 -07002254 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
Jason Evanscec0d632015-09-14 23:17:25 -07002255 old_usize, old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002256
2257 return (p);
2258}
2259
Matthijsc1a6a512015-07-27 22:48:27 +02002260JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2261void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07002262JEMALLOC_ALLOC_SIZE(2)
Jason Evansd82a5e62013-12-12 22:35:52 -08002263je_rallocx(void *ptr, size_t size, int flags)
2264{
2265 void *p;
Jason Evans5460aa62014-09-22 21:09:23 -07002266 tsd_t *tsd;
Jason Evans9c640bf2014-09-11 16:20:44 -07002267 size_t usize;
Daniel Micayd33f8342014-10-24 13:18:57 -04002268 size_t old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002269 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07002270 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002271 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08002272 arena_t *arena;
Jason Evans1cb181e2015-01-29 15:30:47 -08002273 tcache_t *tcache;
Jason Evansd82a5e62013-12-12 22:35:52 -08002274
2275 assert(ptr != NULL);
2276 assert(size != 0);
Jason Evans10aff3f2015-01-20 15:37:51 -08002277 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002278 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002279 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07002280
Jason Evans9c640bf2014-09-11 16:20:44 -07002281 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07002282 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Christopher Ferrise4294032016-03-02 14:33:02 -08002283 arena = arena_get(arena_ind, true);
Jason Evans8bb31982014-10-07 23:14:57 -07002284 if (unlikely(arena == NULL))
2285 goto label_oom;
Jason Evans1cb181e2015-01-29 15:30:47 -08002286 } else
Jason Evansd82a5e62013-12-12 22:35:52 -08002287 arena = NULL;
Jason Evans1cb181e2015-01-29 15:30:47 -08002288
2289 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2290 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2291 tcache = NULL;
2292 else
2293 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2294 } else
2295 tcache = tcache_get(tsd, true);
Jason Evansd82a5e62013-12-12 22:35:52 -08002296
Daniel Micayd33f8342014-10-24 13:18:57 -04002297 old_usize = isalloc(ptr, config_prof);
Jason Evans9c640bf2014-09-11 16:20:44 -07002298 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002299 old_rzsize = u2rz(old_usize);
2300
Jason Evansd82a5e62013-12-12 22:35:52 -08002301 if (config_prof && opt_prof) {
Jason Evansb2c31662014-01-12 15:05:44 -08002302 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Christopher Ferrise4294032016-03-02 14:33:02 -08002303 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2304 goto label_oom;
Jason Evans5460aa62014-09-22 21:09:23 -07002305 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002306 zero, tcache, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002307 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002308 goto label_oom;
Jason Evansd82a5e62013-12-12 22:35:52 -08002309 } else {
Daniel Micayd33f8342014-10-24 13:18:57 -04002310 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002311 tcache, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002312 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002313 goto label_oom;
Jason Evans9c640bf2014-09-11 16:20:44 -07002314 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evansd82a5e62013-12-12 22:35:52 -08002315 usize = isalloc(p, config_prof);
2316 }
Jason Evansdc0610a2015-06-22 18:48:58 -07002317 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
Jason Evansd82a5e62013-12-12 22:35:52 -08002318
2319 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002320 *tsd_thread_allocatedp_get(tsd) += usize;
2321 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002322 }
2323 UTRACE(ptr, size, p);
Jason Evansbd87b012014-04-15 16:35:08 -07002324 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2325 old_rzsize, false, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002326 return (p);
2327label_oom:
Jason Evans9c640bf2014-09-11 16:20:44 -07002328 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansd82a5e62013-12-12 22:35:52 -08002329 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2330 abort();
2331 }
2332 UTRACE(ptr, size, 0);
2333 return (NULL);
2334}
2335
Jason Evansb2c31662014-01-12 15:05:44 -08002336JEMALLOC_ALWAYS_INLINE_C size_t
Christopher Ferrise4294032016-03-02 14:33:02 -08002337ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2338 size_t extra, size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002339{
2340 size_t usize;
2341
Christopher Ferrise4294032016-03-02 14:33:02 -08002342 if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002343 return (old_usize);
2344 usize = isalloc(ptr, config_prof);
2345
2346 return (usize);
2347}
2348
2349static size_t
Christopher Ferrise4294032016-03-02 14:33:02 -08002350ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2351 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08002352{
2353 size_t usize;
2354
Jason Evans602c8e02014-08-18 16:22:13 -07002355 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08002356 return (old_usize);
Christopher Ferrise4294032016-03-02 14:33:02 -08002357 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
2358 zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002359
2360 return (usize);
2361}
2362
2363JEMALLOC_ALWAYS_INLINE_C size_t
Jason Evans5460aa62014-09-22 21:09:23 -07002364ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
Daniel Micaydc652132014-10-30 23:23:16 -04002365 size_t extra, size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002366{
Jason Evansce9a4e32015-09-14 23:31:02 -07002367 size_t usize_max, usize;
Jason Evanscec0d632015-09-14 23:17:25 -07002368 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07002369 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002370
Jason Evanscec0d632015-09-14 23:17:25 -07002371 prof_active = prof_active_get_unlocked();
Jason Evans602c8e02014-08-18 16:22:13 -07002372 old_tctx = prof_tctx_get(ptr);
Jason Evans6e73dc12014-09-09 19:37:26 -07002373 /*
2374 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2375 * Therefore, compute its maximum possible value and use that in
2376 * prof_alloc_prep() to decide whether to capture a backtrace.
2377 * prof_realloc() will use the actual usize to decide whether to sample.
2378 */
Christopher Ferrise4294032016-03-02 14:33:02 -08002379 if (alignment == 0) {
2380 usize_max = s2u(size+extra);
2381 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2382 } else {
2383 usize_max = sa2u(size+extra, alignment);
2384 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2385 /*
2386 * usize_max is out of range, and chances are that
2387 * allocation will fail, but use the maximum possible
2388 * value and carry on with prof_alloc_prep(), just in
2389 * case allocation succeeds.
2390 */
2391 usize_max = HUGE_MAXCLASS;
2392 }
2393 }
Jason Evansce9a4e32015-09-14 23:31:02 -07002394 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
Christopher Ferrise4294032016-03-02 14:33:02 -08002395
Jason Evans9c640bf2014-09-11 16:20:44 -07002396 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Christopher Ferrise4294032016-03-02 14:33:02 -08002397 usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
Jason Evans38e2c8f2015-09-17 10:05:56 -07002398 alignment, zero, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002399 } else {
Christopher Ferrise4294032016-03-02 14:33:02 -08002400 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2401 alignment, zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002402 }
Jason Evans38e2c8f2015-09-17 10:05:56 -07002403 if (usize == old_usize) {
Jason Evans5460aa62014-09-22 21:09:23 -07002404 prof_alloc_rollback(tsd, tctx, false);
Jason Evansb2c31662014-01-12 15:05:44 -08002405 return (usize);
Jason Evans6e73dc12014-09-09 19:37:26 -07002406 }
Jason Evans708ed792015-09-14 23:48:11 -07002407 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
Jason Evanscec0d632015-09-14 23:17:25 -07002408 old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002409
2410 return (usize);
2411}
2412
Jason Evans00632602015-07-21 08:10:38 -07002413JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002414je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2415{
Jason Evans5460aa62014-09-22 21:09:23 -07002416 tsd_t *tsd;
Jason Evans66576932013-12-15 16:21:30 -08002417 size_t usize, old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002418 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07002419 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002420 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08002421
2422 assert(ptr != NULL);
2423 assert(size != 0);
2424 assert(SIZE_T_MAX - size >= extra);
Jason Evans10aff3f2015-01-20 15:37:51 -08002425 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002426 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002427 tsd = tsd_fetch();
Jason Evansd82a5e62013-12-12 22:35:52 -08002428
Jason Evansb2c31662014-01-12 15:05:44 -08002429 old_usize = isalloc(ptr, config_prof);
Jason Evans9a505b72015-09-15 14:39:58 -07002430
Christopher Ferrise4294032016-03-02 14:33:02 -08002431 /*
2432 * The API explicitly absolves itself of protecting against (size +
2433 * extra) numerical overflow, but we may need to clamp extra to avoid
2434 * exceeding HUGE_MAXCLASS.
2435 *
2436 * Ordinarily, size limit checking is handled deeper down, but here we
2437 * have to check as part of (size + extra) clamping, since we need the
2438 * clamped value in the above helper functions.
2439 */
2440 if (unlikely(size > HUGE_MAXCLASS)) {
2441 usize = old_usize;
2442 goto label_not_resized;
Jason Evans9a505b72015-09-15 14:39:58 -07002443 }
Christopher Ferrise4294032016-03-02 14:33:02 -08002444 if (unlikely(HUGE_MAXCLASS - size < extra))
2445 extra = HUGE_MAXCLASS - size;
Jason Evans9a505b72015-09-15 14:39:58 -07002446
Jason Evans9c640bf2014-09-11 16:20:44 -07002447 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002448 old_rzsize = u2rz(old_usize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002449
2450 if (config_prof && opt_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002451 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002452 alignment, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002453 } else {
Christopher Ferrise4294032016-03-02 14:33:02 -08002454 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2455 alignment, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002456 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002457 if (unlikely(usize == old_usize))
Jason Evansb2c31662014-01-12 15:05:44 -08002458 goto label_not_resized;
Jason Evansd82a5e62013-12-12 22:35:52 -08002459
2460 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002461 *tsd_thread_allocatedp_get(tsd) += usize;
2462 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002463 }
Jason Evansbd87b012014-04-15 16:35:08 -07002464 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2465 old_rzsize, false, zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002466label_not_resized:
Jason Evansd82a5e62013-12-12 22:35:52 -08002467 UTRACE(ptr, size, ptr);
2468 return (usize);
2469}
2470
Jason Evans00632602015-07-21 08:10:38 -07002471JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2472JEMALLOC_ATTR(pure)
Jason Evansd82a5e62013-12-12 22:35:52 -08002473je_sallocx(const void *ptr, int flags)
2474{
2475 size_t usize;
Jason Evans289053c2009-06-22 12:08:42 -07002476
Jason Evans10aff3f2015-01-20 15:37:51 -08002477 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08002478 malloc_thread_init();
Jason Evans8e3c3c62010-09-17 15:46:18 -07002479
Jason Evanse0a08a12015-03-18 21:06:58 -07002480 if (config_ivsalloc)
Jason Evansd82a5e62013-12-12 22:35:52 -08002481 usize = ivsalloc(ptr, config_prof);
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002482 else
Jason Evansd82a5e62013-12-12 22:35:52 -08002483 usize = isalloc(ptr, config_prof);
Jason Evans289053c2009-06-22 12:08:42 -07002484
Jason Evansd82a5e62013-12-12 22:35:52 -08002485 return (usize);
Jason Evans289053c2009-06-22 12:08:42 -07002486}
2487
Jason Evans00632602015-07-21 08:10:38 -07002488JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002489je_dallocx(void *ptr, int flags)
Jason Evans4201af02010-01-24 02:53:40 -08002490{
Jason Evans8bb31982014-10-07 23:14:57 -07002491 tsd_t *tsd;
Jason Evans1cb181e2015-01-29 15:30:47 -08002492 tcache_t *tcache;
Jason Evans4201af02010-01-24 02:53:40 -08002493
Jason Evansd82a5e62013-12-12 22:35:52 -08002494 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08002495 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002496
Jason Evans8bb31982014-10-07 23:14:57 -07002497 tsd = tsd_fetch();
Jason Evans1cb181e2015-01-29 15:30:47 -08002498 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2499 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2500 tcache = NULL;
2501 else
2502 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
Jason Evansd82a5e62013-12-12 22:35:52 -08002503 } else
Jason Evans1cb181e2015-01-29 15:30:47 -08002504 tcache = tcache_get(tsd, false);
Jason Evansd82a5e62013-12-12 22:35:52 -08002505
2506 UTRACE(ptr, 0, 0);
Qi Wangf4a0f322015-10-27 15:12:10 -07002507 ifree(tsd_fetch(), ptr, tcache, true);
Jason Evansd82a5e62013-12-12 22:35:52 -08002508}
2509
Jason Evansa2260c92014-09-09 10:29:26 -07002510JEMALLOC_ALWAYS_INLINE_C size_t
2511inallocx(size_t size, int flags)
2512{
2513 size_t usize;
2514
Jason Evans9c640bf2014-09-11 16:20:44 -07002515 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
Jason Evansa2260c92014-09-09 10:29:26 -07002516 usize = s2u(size);
2517 else
2518 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
Jason Evansa2260c92014-09-09 10:29:26 -07002519 return (usize);
2520}
2521
Jason Evans00632602015-07-21 08:10:38 -07002522JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Daniel Micay4cfe5512014-08-28 15:41:48 -04002523je_sdallocx(void *ptr, size_t size, int flags)
2524{
Jason Evans8bb31982014-10-07 23:14:57 -07002525 tsd_t *tsd;
Jason Evans1cb181e2015-01-29 15:30:47 -08002526 tcache_t *tcache;
Jason Evansa2260c92014-09-09 10:29:26 -07002527 size_t usize;
Daniel Micay4cfe5512014-08-28 15:41:48 -04002528
2529 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08002530 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansa2260c92014-09-09 10:29:26 -07002531 usize = inallocx(size, flags);
2532 assert(usize == isalloc(ptr, config_prof));
Daniel Micay4cfe5512014-08-28 15:41:48 -04002533
Jason Evans8bb31982014-10-07 23:14:57 -07002534 tsd = tsd_fetch();
Jason Evans1cb181e2015-01-29 15:30:47 -08002535 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2536 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2537 tcache = NULL;
2538 else
2539 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
Daniel Micay4cfe5512014-08-28 15:41:48 -04002540 } else
Jason Evans1cb181e2015-01-29 15:30:47 -08002541 tcache = tcache_get(tsd, false);
Daniel Micay4cfe5512014-08-28 15:41:48 -04002542
2543 UTRACE(ptr, 0, 0);
Jason Evans1cb181e2015-01-29 15:30:47 -08002544 isfree(tsd, ptr, usize, tcache);
Daniel Micay4cfe5512014-08-28 15:41:48 -04002545}
2546
Jason Evans00632602015-07-21 08:10:38 -07002547JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2548JEMALLOC_ATTR(pure)
Jason Evansd82a5e62013-12-12 22:35:52 -08002549je_nallocx(size_t size, int flags)
2550{
Christopher Ferrise4294032016-03-02 14:33:02 -08002551 size_t usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002552
2553 assert(size != 0);
2554
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002555 if (unlikely(malloc_init()))
Jason Evansd82a5e62013-12-12 22:35:52 -08002556 return (0);
2557
Christopher Ferrise4294032016-03-02 14:33:02 -08002558 usize = inallocx(size, flags);
2559 if (unlikely(usize > HUGE_MAXCLASS))
2560 return (0);
2561
2562 return (usize);
Jason Evans4201af02010-01-24 02:53:40 -08002563}
2564
Jason Evans00632602015-07-21 08:10:38 -07002565JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002566je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08002567 size_t newlen)
2568{
2569
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002570 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002571 return (EAGAIN);
2572
Jason Evans3c234352010-01-27 13:10:55 -08002573 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2574}
2575
Jason Evans00632602015-07-21 08:10:38 -07002576JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002577je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08002578{
2579
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002580 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002581 return (EAGAIN);
2582
Jason Evans3c234352010-01-27 13:10:55 -08002583 return (ctl_nametomib(name, mibp, miblenp));
2584}
2585
Jason Evans00632602015-07-21 08:10:38 -07002586JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002587je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2588 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08002589{
2590
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002591 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002592 return (EAGAIN);
2593
Jason Evans3c234352010-01-27 13:10:55 -08002594 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2595}
2596
Jason Evans00632602015-07-21 08:10:38 -07002597JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002598je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2599 const char *opts)
2600{
2601
2602 stats_print(write_cb, cbopaque, opts);
2603}
2604
Jason Evans00632602015-07-21 08:10:38 -07002605JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002606je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2607{
2608 size_t ret;
2609
Jason Evans10aff3f2015-01-20 15:37:51 -08002610 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002611 malloc_thread_init();
2612
Jason Evanse0a08a12015-03-18 21:06:58 -07002613 if (config_ivsalloc)
Jason Evansd82a5e62013-12-12 22:35:52 -08002614 ret = ivsalloc(ptr, config_prof);
2615 else
Jason Evanscbf3a6d2015-02-11 12:24:27 -08002616 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
Jason Evansd82a5e62013-12-12 22:35:52 -08002617
2618 return (ret);
2619}
2620
Jason Evans7e77eaf2012-03-02 17:47:37 -08002621/*
2622 * End non-standard functions.
2623 */
2624/******************************************************************************/
2625/*
Jason Evans289053c2009-06-22 12:08:42 -07002626 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07002627 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07002628 */
2629
Jason Evans20f1fc92012-10-09 14:46:22 -07002630/*
2631 * If an application creates a thread before doing any allocation in the main
2632 * thread, then calls fork(2) in the main thread followed by memory allocation
2633 * in the child process, a race can occur that results in deadlock within the
2634 * child: the main thread may have forked while the created thread had
2635 * partially initialized the allocator. Ordinarily jemalloc prevents
2636 * fork/malloc races via the following functions it registers during
2637 * initialization using pthread_atfork(), but of course that does no good if
2638 * the allocator isn't fully initialized at fork time. The following library
Jason Evans9b756772014-10-10 18:19:20 -07002639 * constructor is a partial solution to this problem. It may still be possible
2640 * to trigger the deadlock described above, but doing so would involve forking
2641 * via a library constructor that runs before jemalloc's runs.
Jason Evans20f1fc92012-10-09 14:46:22 -07002642 */
2643JEMALLOC_ATTR(constructor)
2644static void
2645jemalloc_constructor(void)
2646{
2647
2648 malloc_init();
2649}
2650
Jason Evans41b6afb2012-02-02 22:04:57 -08002651#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002652void
Jason Evans804c9ec2009-06-22 17:44:33 -07002653jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002654#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002655JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002656_malloc_prefork(void)
2657#endif
Jason Evans289053c2009-06-22 12:08:42 -07002658{
Christopher Ferrise4294032016-03-02 14:33:02 -08002659 unsigned i, narenas;
Jason Evans289053c2009-06-22 12:08:42 -07002660
Jason Evans58ad1e42012-05-11 17:40:16 -07002661#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans10aff3f2015-01-20 15:37:51 -08002662 if (!malloc_initialized())
Jason Evans58ad1e42012-05-11 17:40:16 -07002663 return;
2664#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08002665 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002666
Jason Evans289053c2009-06-22 12:08:42 -07002667 /* Acquire all mutexes in a safe order. */
Jason Evans20f1fc92012-10-09 14:46:22 -07002668 ctl_prefork();
Jason Evans88c222c2013-02-06 11:59:30 -08002669 prof_prefork();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002670 malloc_mutex_prefork(&arenas_lock);
Christopher Ferrise4294032016-03-02 14:33:02 -08002671 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2672 arena_t *arena;
2673
2674 if ((arena = arena_get(i, false)) != NULL)
2675 arena_prefork(arena);
Jason Evansfbbb6242010-01-24 17:56:48 -08002676 }
Jason Evansb5225922012-10-09 16:16:00 -07002677 chunk_prefork();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002678 base_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07002679}
2680
Jason Evans41b6afb2012-02-02 22:04:57 -08002681#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002682void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002683jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002684#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002685JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002686_malloc_postfork(void)
2687#endif
Jason Evans289053c2009-06-22 12:08:42 -07002688{
Christopher Ferrise4294032016-03-02 14:33:02 -08002689 unsigned i, narenas;
Jason Evans289053c2009-06-22 12:08:42 -07002690
Jason Evans58ad1e42012-05-11 17:40:16 -07002691#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans10aff3f2015-01-20 15:37:51 -08002692 if (!malloc_initialized())
Jason Evans58ad1e42012-05-11 17:40:16 -07002693 return;
2694#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08002695 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002696
Jason Evans289053c2009-06-22 12:08:42 -07002697 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002698 base_postfork_parent();
Jason Evansb5225922012-10-09 16:16:00 -07002699 chunk_postfork_parent();
Christopher Ferrise4294032016-03-02 14:33:02 -08002700 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2701 arena_t *arena;
2702
2703 if ((arena = arena_get(i, false)) != NULL)
2704 arena_postfork_parent(arena);
Jason Evans289053c2009-06-22 12:08:42 -07002705 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002706 malloc_mutex_postfork_parent(&arenas_lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002707 prof_postfork_parent();
Jason Evans20f1fc92012-10-09 14:46:22 -07002708 ctl_postfork_parent();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002709}
2710
2711void
2712jemalloc_postfork_child(void)
2713{
Christopher Ferrise4294032016-03-02 14:33:02 -08002714 unsigned i, narenas;
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002715
Jason Evans10aff3f2015-01-20 15:37:51 -08002716 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002717
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002718 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002719 base_postfork_child();
Jason Evansb5225922012-10-09 16:16:00 -07002720 chunk_postfork_child();
Christopher Ferrise4294032016-03-02 14:33:02 -08002721 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2722 arena_t *arena;
2723
2724 if ((arena = arena_get(i, false)) != NULL)
2725 arena_postfork_child(arena);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002726 }
2727 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002728 prof_postfork_child();
Jason Evans20f1fc92012-10-09 14:46:22 -07002729 ctl_postfork_child();
Jason Evans289053c2009-06-22 12:08:42 -07002730}
Jason Evans2dbecf12010-09-05 10:35:13 -07002731
2732/******************************************************************************/
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07002733
2734/* ANDROID extension */
Christopher Ferris54d4dfa2016-03-02 16:24:07 -08002735arena_t * a0get(void)
2736{
2737 assert(a0 != NULL);
2738 return (a0);
2739}
2740
Colin Cross368f61e2015-12-29 16:56:53 -08002741#include "android_je_iterate.c"
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07002742#include "android_je_mallinfo.c"
2743/* End ANDROID extension */