blob: 11aebd9447066eee525ddf2b199a51abae6401b8 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanse476f8a2010-01-16 09:53:50 -08007/* Runtime configuration options. */
Jason Evans35799a52016-10-28 23:03:25 -07008const char *je_malloc_conf
Jason Evanse46f8f92016-10-28 23:59:42 -07009#ifndef _WIN32
Jason Evans35799a52016-10-28 23:03:25 -070010 JEMALLOC_ATTR(weak)
11#endif
12 ;
Jason Evansd1b6e182013-01-22 16:54:26 -080013bool opt_abort =
14#ifdef JEMALLOC_DEBUG
15 true
16#else
17 false
18#endif
19 ;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -020020const char *opt_junk =
21#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
22 "true"
23#else
24 "false"
25#endif
26 ;
27bool opt_junk_alloc =
Jason Evansd1b6e182013-01-22 16:54:26 -080028#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
29 true
30#else
31 false
32#endif
33 ;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -020034bool opt_junk_free =
35#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
36 true
37#else
38 false
39#endif
40 ;
41
Jason Evans122449b2012-04-06 00:35:09 -070042size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070043bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070044bool opt_utrace = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080045bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080046bool opt_zero = false;
Jason Evans8f683b92016-02-24 11:03:40 -080047unsigned opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070048
Jason Evansecd3e592014-04-15 14:33:50 -070049/* Initialized to true if the process is running inside Valgrind. */
50bool in_valgrind;
51
Jason Evanscd9a1342012-03-21 18:33:03 -070052unsigned ncpus;
53
Jason Evans767d8502016-02-24 23:58:10 -080054/* Protects arenas initialization. */
Jason Evans8bb31982014-10-07 23:14:57 -070055static malloc_mutex_t arenas_lock;
56/*
57 * Arenas that are used to service external requests. Not all elements of the
58 * arenas array are necessarily used; arenas are created lazily as needed.
59 *
60 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
62 * takes some action to create them and allocate from them.
63 */
Jason Evans767d8502016-02-24 23:58:10 -080064arena_t **arenas;
65static unsigned narenas_total; /* Use narenas_total_*(). */
Jason Evans8bb31982014-10-07 23:14:57 -070066static arena_t *a0; /* arenas[0]; read-only after initialization. */
Jason Evans66cd9532016-04-22 14:34:14 -070067unsigned narenas_auto; /* Read-only after initialization. */
Jason Evanscd9a1342012-03-21 18:33:03 -070068
Jason Evans10aff3f2015-01-20 15:37:51 -080069typedef enum {
70 malloc_init_uninitialized = 3,
71 malloc_init_a0_initialized = 2,
72 malloc_init_recursible = 1,
73 malloc_init_initialized = 0 /* Common case --> jnz. */
74} malloc_init_t;
75static malloc_init_t malloc_init_state = malloc_init_uninitialized;
Jason Evanscd9a1342012-03-21 18:33:03 -070076
Jason Evans3ef51d72016-05-06 12:16:00 -070077/* False should be the common case. Set to true to trigger initialization. */
Qi Wangf4a0f322015-10-27 15:12:10 -070078static bool malloc_slow = true;
79
Jason Evans3ef51d72016-05-06 12:16:00 -070080/* When malloc_slow is true, set the corresponding bits for sanity check. */
Qi Wangf4a0f322015-10-27 15:12:10 -070081enum {
82 flag_opt_junk_alloc = (1U),
83 flag_opt_junk_free = (1U << 1),
84 flag_opt_quarantine = (1U << 2),
85 flag_opt_zero = (1U << 3),
86 flag_opt_utrace = (1U << 4),
87 flag_in_valgrind = (1U << 5),
88 flag_opt_xmalloc = (1U << 6)
89};
90static uint8_t malloc_slow_flags;
91
Jason Evans155bfa72014-10-05 17:54:10 -070092JEMALLOC_ALIGNED(CACHELINE)
Jason Evansf193fd82016-04-08 14:17:57 -070093const size_t pind2sz_tab[NPSIZES] = {
94#define PSZ_yes(lg_grp, ndelta, lg_delta) \
95 (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
96#define PSZ_no(lg_grp, ndelta, lg_delta)
97#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
98 PSZ_##psz(lg_grp, ndelta, lg_delta)
99 SIZE_CLASSES
100#undef PSZ_yes
101#undef PSZ_no
102#undef SC
103};
104
105JEMALLOC_ALIGNED(CACHELINE)
106const size_t index2size_tab[NSIZES] = {
Jason Evans1abb49f2016-04-17 16:16:11 -0700107#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
Jason Evans155bfa72014-10-05 17:54:10 -0700108 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
109 SIZE_CLASSES
110#undef SC
111};
112
113JEMALLOC_ALIGNED(CACHELINE)
114const uint8_t size2index_tab[] = {
Jason Evans81e54752014-10-10 22:34:25 -0700115#if LG_TINY_MIN == 0
116#warning "Dangerous LG_TINY_MIN"
117#define S2B_0(i) i,
118#elif LG_TINY_MIN == 1
119#warning "Dangerous LG_TINY_MIN"
120#define S2B_1(i) i,
121#elif LG_TINY_MIN == 2
122#warning "Dangerous LG_TINY_MIN"
123#define S2B_2(i) i,
124#elif LG_TINY_MIN == 3
Jason Evans155bfa72014-10-05 17:54:10 -0700125#define S2B_3(i) i,
Jason Evans81e54752014-10-10 22:34:25 -0700126#elif LG_TINY_MIN == 4
127#define S2B_4(i) i,
128#elif LG_TINY_MIN == 5
129#define S2B_5(i) i,
130#elif LG_TINY_MIN == 6
131#define S2B_6(i) i,
132#elif LG_TINY_MIN == 7
133#define S2B_7(i) i,
134#elif LG_TINY_MIN == 8
135#define S2B_8(i) i,
136#elif LG_TINY_MIN == 9
137#define S2B_9(i) i,
138#elif LG_TINY_MIN == 10
139#define S2B_10(i) i,
140#elif LG_TINY_MIN == 11
141#define S2B_11(i) i,
142#else
143#error "Unsupported LG_TINY_MIN"
144#endif
145#if LG_TINY_MIN < 1
146#define S2B_1(i) S2B_0(i) S2B_0(i)
147#endif
148#if LG_TINY_MIN < 2
149#define S2B_2(i) S2B_1(i) S2B_1(i)
150#endif
151#if LG_TINY_MIN < 3
152#define S2B_3(i) S2B_2(i) S2B_2(i)
153#endif
154#if LG_TINY_MIN < 4
Jason Evans155bfa72014-10-05 17:54:10 -0700155#define S2B_4(i) S2B_3(i) S2B_3(i)
Jason Evans81e54752014-10-10 22:34:25 -0700156#endif
157#if LG_TINY_MIN < 5
Jason Evans155bfa72014-10-05 17:54:10 -0700158#define S2B_5(i) S2B_4(i) S2B_4(i)
Jason Evans81e54752014-10-10 22:34:25 -0700159#endif
160#if LG_TINY_MIN < 6
Jason Evans155bfa72014-10-05 17:54:10 -0700161#define S2B_6(i) S2B_5(i) S2B_5(i)
Jason Evans81e54752014-10-10 22:34:25 -0700162#endif
163#if LG_TINY_MIN < 7
Jason Evans155bfa72014-10-05 17:54:10 -0700164#define S2B_7(i) S2B_6(i) S2B_6(i)
Jason Evans81e54752014-10-10 22:34:25 -0700165#endif
166#if LG_TINY_MIN < 8
Jason Evans155bfa72014-10-05 17:54:10 -0700167#define S2B_8(i) S2B_7(i) S2B_7(i)
Jason Evans81e54752014-10-10 22:34:25 -0700168#endif
169#if LG_TINY_MIN < 9
Jason Evans155bfa72014-10-05 17:54:10 -0700170#define S2B_9(i) S2B_8(i) S2B_8(i)
Jason Evans81e54752014-10-10 22:34:25 -0700171#endif
172#if LG_TINY_MIN < 10
Jason Evansfc0b3b72014-10-09 17:54:06 -0700173#define S2B_10(i) S2B_9(i) S2B_9(i)
Jason Evans81e54752014-10-10 22:34:25 -0700174#endif
175#if LG_TINY_MIN < 11
Jason Evansfc0b3b72014-10-09 17:54:06 -0700176#define S2B_11(i) S2B_10(i) S2B_10(i)
Jason Evans81e54752014-10-10 22:34:25 -0700177#endif
Jason Evans155bfa72014-10-05 17:54:10 -0700178#define S2B_no(i)
Jason Evans1abb49f2016-04-17 16:16:11 -0700179#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
Jason Evans155bfa72014-10-05 17:54:10 -0700180 S2B_##lg_delta_lookup(index)
181 SIZE_CLASSES
182#undef S2B_3
183#undef S2B_4
184#undef S2B_5
185#undef S2B_6
186#undef S2B_7
187#undef S2B_8
188#undef S2B_9
Jason Evansfc0b3b72014-10-09 17:54:06 -0700189#undef S2B_10
190#undef S2B_11
Jason Evans155bfa72014-10-05 17:54:10 -0700191#undef S2B_no
192#undef SC
193};
194
Jason Evans41b6afb2012-02-02 22:04:57 -0800195#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -0700196/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -0700197# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -0800198# define INITIALIZER pthread_self()
199# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -0700200static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800201#else
Jason Evans02b23122012-04-05 11:06:23 -0700202# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -0800203# define INITIALIZER true
204# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -0700205static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800206#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700207
208/* Used to avoid initialization races. */
Mike Hommeya19e87f2012-04-21 21:27:46 -0700209#ifdef _WIN32
Matthijsa1aaf942015-06-25 22:53:58 +0200210#if _WIN32_WINNT >= 0x0600
211static malloc_mutex_t init_lock = SRWLOCK_INIT;
212#else
Mike Hommeya19e87f2012-04-21 21:27:46 -0700213static malloc_mutex_t init_lock;
Mike Hommey0a116fa2015-09-03 15:48:48 +0900214static bool init_lock_initialized = false;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700215
216JEMALLOC_ATTR(constructor)
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200217static void WINAPI
218_init_init_lock(void)
Mike Hommeya19e87f2012-04-21 21:27:46 -0700219{
220
Mike Hommey0a116fa2015-09-03 15:48:48 +0900221 /* If another constructor in the same binary is using mallctl to
222 * e.g. setup chunk hooks, it may end up running before this one,
223 * and malloc_init_hard will crash trying to lock the uninitialized
224 * lock. So we force an initialization of the lock in
225 * malloc_init_hard as well. We don't try to care about atomicity
226 * of the accessed to the init_lock_initialized boolean, since it
227 * really only matters early in the process creation, before any
228 * separate thread normally starts doing anything. */
229 if (!init_lock_initialized)
Jason Evansb2c0d632016-04-13 23:36:15 -0700230 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
Mike Hommey0a116fa2015-09-03 15:48:48 +0900231 init_lock_initialized = true;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700232}
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200233
234#ifdef _MSC_VER
235# pragma section(".CRT$XCU", read)
236JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
237static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
238#endif
Matthijsa1aaf942015-06-25 22:53:58 +0200239#endif
Mike Hommeya19e87f2012-04-21 21:27:46 -0700240#else
Jason Evanscd9a1342012-03-21 18:33:03 -0700241static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700242#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700243
Jason Evansb1476112012-04-05 13:36:17 -0700244typedef struct {
245 void *p; /* Input pointer (as in realloc(p, s)). */
246 size_t s; /* Request size. */
247 void *r; /* Result pointer. */
248} malloc_utrace_t;
249
250#ifdef JEMALLOC_UTRACE
251# define UTRACE(a, b, c) do { \
Jason Evans9c640bf2014-09-11 16:20:44 -0700252 if (unlikely(opt_utrace)) { \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800253 int utrace_serrno = errno; \
Jason Evansb1476112012-04-05 13:36:17 -0700254 malloc_utrace_t ut; \
255 ut.p = (a); \
256 ut.s = (b); \
257 ut.r = (c); \
258 utrace(&ut, sizeof(ut)); \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800259 errno = utrace_serrno; \
Jason Evansb1476112012-04-05 13:36:17 -0700260 } \
261} while (0)
262#else
263# define UTRACE(a, b, c)
264#endif
265
Jason Evans289053c2009-06-22 12:08:42 -0700266/******************************************************************************/
Jason Evansb2c31662014-01-12 15:05:44 -0800267/*
268 * Function prototypes for static functions that are referenced prior to
269 * definition.
270 */
Jason Evans289053c2009-06-22 12:08:42 -0700271
Jason Evans10aff3f2015-01-20 15:37:51 -0800272static bool malloc_init_hard_a0(void);
Jason Evans289053c2009-06-22 12:08:42 -0700273static bool malloc_init_hard(void);
274
Jason Evans289053c2009-06-22 12:08:42 -0700275/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -0700276/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800277 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700278 */
279
Jason Evans10aff3f2015-01-20 15:37:51 -0800280JEMALLOC_ALWAYS_INLINE_C bool
281malloc_initialized(void)
282{
283
284 return (malloc_init_state == malloc_init_initialized);
285}
286
Jason Evans8bb31982014-10-07 23:14:57 -0700287JEMALLOC_ALWAYS_INLINE_C void
288malloc_thread_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700289{
Jason Evans289053c2009-06-22 12:08:42 -0700290
Jason Evanse476f8a2010-01-16 09:53:50 -0800291 /*
Jason Evans8bb31982014-10-07 23:14:57 -0700292 * TSD initialization can't be safely done as a side effect of
293 * deallocation, because it is possible for a thread to do nothing but
294 * deallocate its TLS data via free(), in which case writing to TLS
295 * would cause write-after-free memory corruption. The quarantine
296 * facility *only* gets used as a side effect of deallocation, so make
297 * a best effort attempt at initializing its TSD by hooking all
298 * allocation events.
Jason Evanse476f8a2010-01-16 09:53:50 -0800299 */
Jason Evans8bb31982014-10-07 23:14:57 -0700300 if (config_fill && unlikely(opt_quarantine))
301 quarantine_alloc_hook();
Jason Evans289053c2009-06-22 12:08:42 -0700302}
303
Jason Evans8bb31982014-10-07 23:14:57 -0700304JEMALLOC_ALWAYS_INLINE_C bool
Jason Evans10aff3f2015-01-20 15:37:51 -0800305malloc_init_a0(void)
306{
307
308 if (unlikely(malloc_init_state == malloc_init_uninitialized))
309 return (malloc_init_hard_a0());
310 return (false);
311}
312
313JEMALLOC_ALWAYS_INLINE_C bool
Jason Evans8bb31982014-10-07 23:14:57 -0700314malloc_init(void)
315{
316
Jason Evans10aff3f2015-01-20 15:37:51 -0800317 if (unlikely(!malloc_initialized()) && malloc_init_hard())
Jason Evans8bb31982014-10-07 23:14:57 -0700318 return (true);
319 malloc_thread_init();
320
321 return (false);
322}
323
324/*
Jason Evans0c12dca2016-05-07 12:42:31 -0700325 * The a0*() functions are used instead of i{d,}alloc() in situations that
Jason Evans10aff3f2015-01-20 15:37:51 -0800326 * cannot tolerate TLS variable access.
Jason Evans8bb31982014-10-07 23:14:57 -0700327 */
328
Jason Evans8bb31982014-10-07 23:14:57 -0700329static void *
Jason Evans4581b972014-11-27 17:22:36 -0200330a0ialloc(size_t size, bool zero, bool is_metadata)
Jason Evans8bb31982014-10-07 23:14:57 -0700331{
Jason Evans8bb31982014-10-07 23:14:57 -0700332
Jason Evans10aff3f2015-01-20 15:37:51 -0800333 if (unlikely(malloc_init_a0()))
Jason Evans8bb31982014-10-07 23:14:57 -0700334 return (NULL);
335
Jason Evansc1e00ef2016-05-10 22:21:10 -0700336 return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
337 is_metadata, arena_get(TSDN_NULL, 0, true), true));
Jason Evans8bb31982014-10-07 23:14:57 -0700338}
339
Jason Evans10aff3f2015-01-20 15:37:51 -0800340static void
Jason Evans4581b972014-11-27 17:22:36 -0200341a0idalloc(void *ptr, bool is_metadata)
Jason Evans8bb31982014-10-07 23:14:57 -0700342{
Jason Evans8bb31982014-10-07 23:14:57 -0700343
Jason Evansc1e00ef2016-05-10 22:21:10 -0700344 idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
Jason Evans8bb31982014-10-07 23:14:57 -0700345}
346
Jason Evans962a2972016-10-20 23:59:12 -0700347arena_t *
348a0get(void)
349{
350
351 return (a0);
352}
353
Jason Evans10aff3f2015-01-20 15:37:51 -0800354void *
Jason Evans4581b972014-11-27 17:22:36 -0200355a0malloc(size_t size)
Jason Evans10aff3f2015-01-20 15:37:51 -0800356{
357
Jason Evans4581b972014-11-27 17:22:36 -0200358 return (a0ialloc(size, false, true));
Jason Evans10aff3f2015-01-20 15:37:51 -0800359}
360
361void
362a0dalloc(void *ptr)
363{
364
Jason Evans4581b972014-11-27 17:22:36 -0200365 a0idalloc(ptr, true);
Jason Evans10aff3f2015-01-20 15:37:51 -0800366}
367
368/*
369 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
370 * situations that cannot tolerate TLS variable access (TLS allocation and very
371 * early internal data structure initialization).
372 */
373
374void *
375bootstrap_malloc(size_t size)
376{
377
378 if (unlikely(size == 0))
379 size = 1;
380
Jason Evans4581b972014-11-27 17:22:36 -0200381 return (a0ialloc(size, false, false));
Jason Evans10aff3f2015-01-20 15:37:51 -0800382}
383
384void *
385bootstrap_calloc(size_t num, size_t size)
386{
387 size_t num_size;
388
389 num_size = num * size;
390 if (unlikely(num_size == 0)) {
391 assert(num == 0 || size == 0);
392 num_size = 1;
393 }
394
Jason Evans4581b972014-11-27 17:22:36 -0200395 return (a0ialloc(num_size, true, false));
Jason Evans10aff3f2015-01-20 15:37:51 -0800396}
397
398void
399bootstrap_free(void *ptr)
400{
401
402 if (unlikely(ptr == NULL))
403 return;
404
Jason Evans4581b972014-11-27 17:22:36 -0200405 a0idalloc(ptr, false);
Jason Evans10aff3f2015-01-20 15:37:51 -0800406}
407
Jason Evans767d8502016-02-24 23:58:10 -0800408static void
409arena_set(unsigned ind, arena_t *arena)
410{
411
412 atomic_write_p((void **)&arenas[ind], arena);
413}
414
415static void
416narenas_total_set(unsigned narenas)
417{
418
419 atomic_write_u(&narenas_total, narenas);
420}
421
422static void
423narenas_total_inc(void)
424{
425
426 atomic_add_u(&narenas_total, 1);
427}
428
429unsigned
430narenas_total_get(void)
431{
432
433 return (atomic_read_u(&narenas_total));
434}
435
Jason Evans8bb31982014-10-07 23:14:57 -0700436/* Create a new arena and insert it into the arenas array at index ind. */
Jason Evans3a8b9b12014-10-08 00:54:16 -0700437static arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700438arena_init_locked(tsdn_t *tsdn, unsigned ind)
Jason Evans8bb31982014-10-07 23:14:57 -0700439{
440 arena_t *arena;
441
Jason Evans767d8502016-02-24 23:58:10 -0800442 assert(ind <= narenas_total_get());
Jason Evans1cb181e2015-01-29 15:30:47 -0800443 if (ind > MALLOCX_ARENA_MAX)
444 return (NULL);
Jason Evans767d8502016-02-24 23:58:10 -0800445 if (ind == narenas_total_get())
446 narenas_total_inc();
Jason Evans8bb31982014-10-07 23:14:57 -0700447
448 /*
449 * Another thread may have already initialized arenas[ind] if it's an
450 * auto arena.
451 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700452 arena = arena_get(tsdn, ind, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700453 if (arena != NULL) {
454 assert(ind < narenas_auto);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700455 return (arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700456 }
457
458 /* Actually initialize the arena. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700459 arena = arena_new(tsdn, ind);
Jason Evans767d8502016-02-24 23:58:10 -0800460 arena_set(ind, arena);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700461 return (arena);
462}
463
464arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700465arena_init(tsdn_t *tsdn, unsigned ind)
Jason Evans3a8b9b12014-10-08 00:54:16 -0700466{
467 arena_t *arena;
468
Jason Evansc1e00ef2016-05-10 22:21:10 -0700469 malloc_mutex_lock(tsdn, &arenas_lock);
470 arena = arena_init_locked(tsdn, ind);
471 malloc_mutex_unlock(tsdn, &arenas_lock);
Jason Evans8bb31982014-10-07 23:14:57 -0700472 return (arena);
473}
474
Jason Evans8bb31982014-10-07 23:14:57 -0700475static void
Jason Evans66cd9532016-04-22 14:34:14 -0700476arena_bind(tsd_t *tsd, unsigned ind, bool internal)
Jason Evans8bb31982014-10-07 23:14:57 -0700477{
Jason Evans767d8502016-02-24 23:58:10 -0800478 arena_t *arena;
Jason Evans8bb31982014-10-07 23:14:57 -0700479
Qi Wang57ed8942016-09-22 09:13:45 -0700480 if (!tsd_nominal(tsd))
481 return;
482
Jason Evansc1e00ef2016-05-10 22:21:10 -0700483 arena = arena_get(tsd_tsdn(tsd), ind, false);
Jason Evans66cd9532016-04-22 14:34:14 -0700484 arena_nthreads_inc(arena, internal);
Jason Evans767d8502016-02-24 23:58:10 -0800485
Qi Wang57ed8942016-09-22 09:13:45 -0700486 if (internal)
487 tsd_iarena_set(tsd, arena);
488 else
489 tsd_arena_set(tsd, arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700490}
491
492void
493arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
494{
495 arena_t *oldarena, *newarena;
496
Jason Evansc1e00ef2016-05-10 22:21:10 -0700497 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
498 newarena = arena_get(tsd_tsdn(tsd), newind, false);
Jason Evans66cd9532016-04-22 14:34:14 -0700499 arena_nthreads_dec(oldarena, false);
500 arena_nthreads_inc(newarena, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700501 tsd_arena_set(tsd, newarena);
502}
503
Jason Evans8bb31982014-10-07 23:14:57 -0700504static void
Jason Evans66cd9532016-04-22 14:34:14 -0700505arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
Jason Evans8bb31982014-10-07 23:14:57 -0700506{
507 arena_t *arena;
508
Jason Evansc1e00ef2016-05-10 22:21:10 -0700509 arena = arena_get(tsd_tsdn(tsd), ind, false);
Jason Evans66cd9532016-04-22 14:34:14 -0700510 arena_nthreads_dec(arena, internal);
511 if (internal)
512 tsd_iarena_set(tsd, NULL);
513 else
514 tsd_arena_set(tsd, NULL);
Jason Evans8bb31982014-10-07 23:14:57 -0700515}
516
Jason Evansdb927b62016-02-19 19:37:10 -0800517arena_tdata_t *
518arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
Jason Evans8bb31982014-10-07 23:14:57 -0700519{
Jason Evansdb927b62016-02-19 19:37:10 -0800520 arena_tdata_t *tdata, *arenas_tdata_old;
521 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
522 unsigned narenas_tdata_old, i;
523 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
Jason Evans8bb31982014-10-07 23:14:57 -0700524 unsigned narenas_actual = narenas_total_get();
525
Jason Evansdb927b62016-02-19 19:37:10 -0800526 /*
527 * Dissociate old tdata array (and set up for deallocation upon return)
528 * if it's too small.
529 */
530 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
531 arenas_tdata_old = arenas_tdata;
532 narenas_tdata_old = narenas_tdata;
533 arenas_tdata = NULL;
534 narenas_tdata = 0;
535 tsd_arenas_tdata_set(tsd, arenas_tdata);
536 tsd_narenas_tdata_set(tsd, narenas_tdata);
537 } else {
538 arenas_tdata_old = NULL;
539 narenas_tdata_old = 0;
Jason Evans8bb31982014-10-07 23:14:57 -0700540 }
541
Jason Evansdb927b62016-02-19 19:37:10 -0800542 /* Allocate tdata array if it's missing. */
543 if (arenas_tdata == NULL) {
544 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
545 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
Jason Evans8bb31982014-10-07 23:14:57 -0700546
Jason Evansdb927b62016-02-19 19:37:10 -0800547 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
548 *arenas_tdata_bypassp = true;
549 arenas_tdata = (arena_tdata_t *)a0malloc(
550 sizeof(arena_tdata_t) * narenas_tdata);
551 *arenas_tdata_bypassp = false;
Jason Evans30949da2015-08-25 16:13:59 -0700552 }
Jason Evansdb927b62016-02-19 19:37:10 -0800553 if (arenas_tdata == NULL) {
554 tdata = NULL;
555 goto label_return;
Jason Evans8bb31982014-10-07 23:14:57 -0700556 }
Jason Evansdb927b62016-02-19 19:37:10 -0800557 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
558 tsd_arenas_tdata_set(tsd, arenas_tdata);
559 tsd_narenas_tdata_set(tsd, narenas_tdata);
Jason Evans8bb31982014-10-07 23:14:57 -0700560 }
561
562 /*
Jason Evansdb927b62016-02-19 19:37:10 -0800563 * Copy to tdata array. It's possible that the actual number of arenas
564 * has increased since narenas_total_get() was called above, but that
565 * causes no correctness issues unless two threads concurrently execute
566 * the arenas.extend mallctl, which we trust mallctl synchronization to
Jason Evans8bb31982014-10-07 23:14:57 -0700567 * prevent.
568 */
Jason Evans8bb31982014-10-07 23:14:57 -0700569
Jason Evans243f7a02016-02-19 20:09:31 -0800570 /* Copy/initialize tickers. */
571 for (i = 0; i < narenas_actual; i++) {
572 if (i < narenas_tdata_old) {
573 ticker_copy(&arenas_tdata[i].decay_ticker,
574 &arenas_tdata_old[i].decay_ticker);
575 } else {
576 ticker_init(&arenas_tdata[i].decay_ticker,
577 DECAY_NTICKS_PER_UPDATE);
578 }
579 }
Jason Evans767d8502016-02-24 23:58:10 -0800580 if (narenas_tdata > narenas_actual) {
581 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
582 * (narenas_tdata - narenas_actual));
Jason Evans8bb31982014-10-07 23:14:57 -0700583 }
584
Jason Evansdb927b62016-02-19 19:37:10 -0800585 /* Read the refreshed tdata array. */
586 tdata = &arenas_tdata[ind];
587label_return:
588 if (arenas_tdata_old != NULL)
589 a0dalloc(arenas_tdata_old);
590 return (tdata);
Jason Evans8bb31982014-10-07 23:14:57 -0700591}
592
593/* Slow path, called only by arena_choose(). */
594arena_t *
Jason Evans66cd9532016-04-22 14:34:14 -0700595arena_choose_hard(tsd_t *tsd, bool internal)
Jason Evans289053c2009-06-22 12:08:42 -0700596{
Jason Evans66cd9532016-04-22 14:34:14 -0700597 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700598
Jason Evans609ae592012-10-11 13:53:15 -0700599 if (narenas_auto > 1) {
Jason Evans66cd9532016-04-22 14:34:14 -0700600 unsigned i, j, choose[2], first_null;
Jason Evans597632b2011-03-18 13:41:33 -0700601
Jason Evans66cd9532016-04-22 14:34:14 -0700602 /*
603 * Determine binding for both non-internal and internal
604 * allocation.
605 *
606 * choose[0]: For application allocation.
607 * choose[1]: For internal metadata allocation.
608 */
609
610 for (j = 0; j < 2; j++)
611 choose[j] = 0;
612
Jason Evans609ae592012-10-11 13:53:15 -0700613 first_null = narenas_auto;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700614 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
615 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
Jason Evans609ae592012-10-11 13:53:15 -0700616 for (i = 1; i < narenas_auto; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700617 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
Jason Evans597632b2011-03-18 13:41:33 -0700618 /*
619 * Choose the first arena that has the lowest
620 * number of threads assigned to it.
621 */
Jason Evans66cd9532016-04-22 14:34:14 -0700622 for (j = 0; j < 2; j++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700623 if (arena_nthreads_get(arena_get(
624 tsd_tsdn(tsd), i, false), !!j) <
625 arena_nthreads_get(arena_get(
626 tsd_tsdn(tsd), choose[j], false),
627 !!j))
Jason Evans66cd9532016-04-22 14:34:14 -0700628 choose[j] = i;
629 }
Jason Evans609ae592012-10-11 13:53:15 -0700630 } else if (first_null == narenas_auto) {
Jason Evans597632b2011-03-18 13:41:33 -0700631 /*
632 * Record the index of the first uninitialized
633 * arena, in case all extant arenas are in use.
634 *
635 * NB: It is possible for there to be
636 * discontinuities in terms of initialized
637 * versus uninitialized arenas, due to the
638 * "thread.arena" mallctl.
639 */
640 first_null = i;
641 }
642 }
643
Jason Evans66cd9532016-04-22 14:34:14 -0700644 for (j = 0; j < 2; j++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700645 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
646 choose[j], false), !!j) == 0 || first_null ==
647 narenas_auto) {
Jason Evans66cd9532016-04-22 14:34:14 -0700648 /*
649 * Use an unloaded arena, or the least loaded
650 * arena if all arenas are already initialized.
651 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700652 if (!!j == internal) {
653 ret = arena_get(tsd_tsdn(tsd),
654 choose[j], false);
655 }
Jason Evans66cd9532016-04-22 14:34:14 -0700656 } else {
657 arena_t *arena;
658
659 /* Initialize a new arena. */
660 choose[j] = first_null;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700661 arena = arena_init_locked(tsd_tsdn(tsd),
662 choose[j]);
Jason Evans66cd9532016-04-22 14:34:14 -0700663 if (arena == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700664 malloc_mutex_unlock(tsd_tsdn(tsd),
665 &arenas_lock);
Jason Evans66cd9532016-04-22 14:34:14 -0700666 return (NULL);
667 }
668 if (!!j == internal)
669 ret = arena;
Jason Evans8bb31982014-10-07 23:14:57 -0700670 }
Jason Evans66cd9532016-04-22 14:34:14 -0700671 arena_bind(tsd, choose[j], !!j);
Jason Evans597632b2011-03-18 13:41:33 -0700672 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700673 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700674 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700675 ret = arena_get(tsd_tsdn(tsd), 0, false);
Jason Evans66cd9532016-04-22 14:34:14 -0700676 arena_bind(tsd, 0, false);
677 arena_bind(tsd, 0, true);
Jason Evans597632b2011-03-18 13:41:33 -0700678 }
Jason Evans289053c2009-06-22 12:08:42 -0700679
Jason Evans289053c2009-06-22 12:08:42 -0700680 return (ret);
681}
Jason Evans289053c2009-06-22 12:08:42 -0700682
Jason Evans5460aa62014-09-22 21:09:23 -0700683void
684thread_allocated_cleanup(tsd_t *tsd)
685{
686
687 /* Do nothing. */
688}
689
690void
691thread_deallocated_cleanup(tsd_t *tsd)
692{
693
694 /* Do nothing. */
695}
696
697void
Jason Evans66cd9532016-04-22 14:34:14 -0700698iarena_cleanup(tsd_t *tsd)
699{
700 arena_t *iarena;
701
702 iarena = tsd_iarena_get(tsd);
703 if (iarena != NULL)
704 arena_unbind(tsd, iarena->ind, true);
705}
706
707void
Jason Evans5460aa62014-09-22 21:09:23 -0700708arena_cleanup(tsd_t *tsd)
709{
Jason Evans8bb31982014-10-07 23:14:57 -0700710 arena_t *arena;
711
712 arena = tsd_arena_get(tsd);
713 if (arena != NULL)
Jason Evans66cd9532016-04-22 14:34:14 -0700714 arena_unbind(tsd, arena->ind, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700715}
716
717void
Jason Evansdb927b62016-02-19 19:37:10 -0800718arenas_tdata_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700719{
Jason Evansdb927b62016-02-19 19:37:10 -0800720 arena_tdata_t *arenas_tdata;
Jason Evans8bb31982014-10-07 23:14:57 -0700721
Jason Evans39f58752016-02-27 21:18:15 -0800722 /* Prevent tsd->arenas_tdata from being (re)created. */
723 *tsd_arenas_tdata_bypassp_get(tsd) = true;
724
Jason Evansdb927b62016-02-19 19:37:10 -0800725 arenas_tdata = tsd_arenas_tdata_get(tsd);
726 if (arenas_tdata != NULL) {
727 tsd_arenas_tdata_set(tsd, NULL);
728 a0dalloc(arenas_tdata);
Christopher Ferris45e9f662015-08-21 12:23:06 -0700729 }
Jason Evans8bb31982014-10-07 23:14:57 -0700730}
731
732void
Jason Evansdb927b62016-02-19 19:37:10 -0800733narenas_tdata_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700734{
735
736 /* Do nothing. */
737}
738
739void
Jason Evansdb927b62016-02-19 19:37:10 -0800740arenas_tdata_bypass_cleanup(tsd_t *tsd)
Jason Evans8bb31982014-10-07 23:14:57 -0700741{
Jason Evans5460aa62014-09-22 21:09:23 -0700742
743 /* Do nothing. */
744}
745
Jason Evans03c22372010-01-03 12:10:42 -0800746static void
747stats_print_atexit(void)
748{
749
Jason Evans7372b152012-02-10 20:22:09 -0800750 if (config_tcache && config_stats) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700751 tsdn_t *tsdn;
Jason Evans609ae592012-10-11 13:53:15 -0700752 unsigned narenas, i;
Jason Evans03c22372010-01-03 12:10:42 -0800753
Jason Evansc1e00ef2016-05-10 22:21:10 -0700754 tsdn = tsdn_fetch();
Jason Evansb2c0d632016-04-13 23:36:15 -0700755
Jason Evans7372b152012-02-10 20:22:09 -0800756 /*
757 * Merge stats from extant threads. This is racy, since
758 * individual threads do not lock when recording tcache stats
759 * events. As a consequence, the final stats may be slightly
760 * out of date by the time they are reported, if other threads
761 * continue to allocate.
762 */
Jason Evans609ae592012-10-11 13:53:15 -0700763 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700764 arena_t *arena = arena_get(tsdn, i, false);
Jason Evans7372b152012-02-10 20:22:09 -0800765 if (arena != NULL) {
766 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800767
Jason Evans7372b152012-02-10 20:22:09 -0800768 /*
769 * tcache_stats_merge() locks bins, so if any
770 * code is introduced that acquires both arena
771 * and bin locks in the opposite order,
772 * deadlocks may result.
773 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700774 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans7372b152012-02-10 20:22:09 -0800775 ql_foreach(tcache, &arena->tcache_ql, link) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700776 tcache_stats_merge(tsdn, tcache, arena);
Jason Evans7372b152012-02-10 20:22:09 -0800777 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700778 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800779 }
Jason Evans03c22372010-01-03 12:10:42 -0800780 }
781 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800782 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700783}
784
Jason Evans289053c2009-06-22 12:08:42 -0700785/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800786 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700787 */
788/******************************************************************************/
789/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800790 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700791 */
792
Daniel Micayb74041f2014-12-09 17:41:34 -0500793#ifndef JEMALLOC_HAVE_SECURE_GETENV
Igor Podlesny95e88de2015-03-24 12:49:26 +0700794static char *
795secure_getenv(const char *name)
796{
797
Daniel Micayb74041f2014-12-09 17:41:34 -0500798# ifdef JEMALLOC_HAVE_ISSETUGID
Igor Podlesny95e88de2015-03-24 12:49:26 +0700799 if (issetugid() != 0)
Daniel Micayb74041f2014-12-09 17:41:34 -0500800 return (NULL);
Igor Podlesny95e88de2015-03-24 12:49:26 +0700801# endif
Daniel Micayb74041f2014-12-09 17:41:34 -0500802 return (getenv(name));
803}
Daniel Micayb74041f2014-12-09 17:41:34 -0500804#endif
805
Jason Evansc9658dd2009-06-22 14:44:08 -0700806static unsigned
807malloc_ncpus(void)
808{
Jason Evansb7924f52009-06-23 19:01:18 -0700809 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700810
Mike Hommeya19e87f2012-04-21 21:27:46 -0700811#ifdef _WIN32
812 SYSTEM_INFO si;
813 GetSystemInfo(&si);
814 result = si.dwNumberOfProcessors;
Dave Watson6c56e192016-11-02 18:22:32 -0700815#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
Dave Watsoned847642016-10-28 13:51:52 -0700816 /*
Dave Watson6c56e192016-11-02 18:22:32 -0700817 * glibc >= 2.6 has the CPU_COUNT macro.
818 *
Dave Watsoned847642016-10-28 13:51:52 -0700819 * glibc's sysconf() uses isspace(). glibc allocates for the first time
820 * *before* setting up the isspace tables. Therefore we need a
821 * different method to get the number of CPUs.
822 */
823 {
824 cpu_set_t set;
825
826 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
827 result = CPU_COUNT(&set);
828 }
Mike Hommeya19e87f2012-04-21 21:27:46 -0700829#else
Jason Evansb7924f52009-06-23 19:01:18 -0700830 result = sysconf(_SC_NPROCESSORS_ONLN);
Corey Richardson1d553f72012-09-26 16:28:29 -0400831#endif
Jason Evansaddad092013-11-29 16:19:44 -0800832 return ((result == -1) ? 1 : (unsigned)result);
Jason Evansc9658dd2009-06-22 14:44:08 -0700833}
Jason Evansb7924f52009-06-23 19:01:18 -0700834
Jason Evans289053c2009-06-22 12:08:42 -0700835static bool
Jason Evanse7339702010-10-23 18:37:06 -0700836malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
837 char const **v_p, size_t *vlen_p)
838{
839 bool accept;
840 const char *opts = *opts_p;
841
842 *k_p = opts;
843
Jason Evans551ebc42014-10-03 10:16:09 -0700844 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700845 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800846 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
847 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
848 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
849 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
850 case 'Y': case 'Z':
851 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
852 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
853 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
854 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
855 case 'y': case 'z':
856 case '0': case '1': case '2': case '3': case '4': case '5':
857 case '6': case '7': case '8': case '9':
858 case '_':
859 opts++;
860 break;
861 case ':':
862 opts++;
863 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
864 *v_p = opts;
865 accept = true;
866 break;
867 case '\0':
868 if (opts != *opts_p) {
869 malloc_write("<jemalloc>: Conf string ends "
870 "with key\n");
871 }
872 return (true);
873 default:
874 malloc_write("<jemalloc>: Malformed conf string\n");
875 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700876 }
877 }
878
Jason Evans551ebc42014-10-03 10:16:09 -0700879 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700880 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800881 case ',':
882 opts++;
883 /*
884 * Look ahead one character here, because the next time
885 * this function is called, it will assume that end of
886 * input has been cleanly reached if no input remains,
887 * but we have optimistically already consumed the
888 * comma if one exists.
889 */
890 if (*opts == '\0') {
891 malloc_write("<jemalloc>: Conf string ends "
892 "with comma\n");
893 }
894 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
895 accept = true;
896 break;
897 case '\0':
898 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
899 accept = true;
900 break;
901 default:
902 opts++;
903 break;
Jason Evanse7339702010-10-23 18:37:06 -0700904 }
905 }
906
907 *opts_p = opts;
908 return (false);
909}
910
911static void
912malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
913 size_t vlen)
914{
Jason Evanse7339702010-10-23 18:37:06 -0700915
Jason Evansd81e4bd2012-03-06 14:57:45 -0800916 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
917 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700918}
919
920static void
Qi Wangf4a0f322015-10-27 15:12:10 -0700921malloc_slow_flag_init(void)
922{
923 /*
924 * Combine the runtime options into malloc_slow for fast path. Called
925 * after processing all the options.
926 */
927 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
928 | (opt_junk_free ? flag_opt_junk_free : 0)
929 | (opt_quarantine ? flag_opt_quarantine : 0)
930 | (opt_zero ? flag_opt_zero : 0)
931 | (opt_utrace ? flag_opt_utrace : 0)
932 | (opt_xmalloc ? flag_opt_xmalloc : 0);
933
934 if (config_valgrind)
935 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
936
937 malloc_slow = (malloc_slow_flags != 0);
938}
939
940static void
Jason Evanse7339702010-10-23 18:37:06 -0700941malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700942{
943 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700944 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700945 const char *opts, *k, *v;
946 size_t klen, vlen;
947
Jason Evans781fe752012-05-15 14:48:14 -0700948 /*
949 * Automatically configure valgrind before processing options. The
950 * valgrind option remains in jemalloc 3.x for compatibility reasons.
951 */
952 if (config_valgrind) {
Jason Evansecd3e592014-04-15 14:33:50 -0700953 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
Jason Evans9c640bf2014-09-11 16:20:44 -0700954 if (config_fill && unlikely(in_valgrind)) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -0200955 opt_junk = "false";
956 opt_junk_alloc = false;
957 opt_junk_free = false;
Jason Evans551ebc42014-10-03 10:16:09 -0700958 assert(!opt_zero);
Jason Evans781fe752012-05-15 14:48:14 -0700959 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
960 opt_redzone = true;
961 }
Jason Evans9c640bf2014-09-11 16:20:44 -0700962 if (config_tcache && unlikely(in_valgrind))
Jason Evans174b70e2012-05-15 23:31:53 -0700963 opt_tcache = false;
Jason Evans781fe752012-05-15 14:48:14 -0700964 }
965
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700966#if defined(__ANDROID__)
Christopher Ferrisfb9c9c82016-03-02 14:33:02 -0800967 for (i = 0; i < 2; i++) {
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700968#else
Jason Evansf8290092016-02-07 14:23:22 -0800969 for (i = 0; i < 4; i++) {
Christopher Ferris6f50cbc2015-09-09 12:17:01 -0700970#endif
Jason Evanse7339702010-10-23 18:37:06 -0700971 /* Get runtime configuration. */
972 switch (i) {
973 case 0:
Jason Evansf8290092016-02-07 14:23:22 -0800974 opts = config_malloc_conf;
975 break;
976 case 1:
Jason Evans0a5489e2012-03-01 17:19:20 -0800977 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700978 /*
979 * Use options that were compiled into the
980 * program.
981 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800982 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700983 } else {
984 /* No configuration specified. */
985 buf[0] = '\0';
986 opts = buf;
987 }
988 break;
Jason Evansf8290092016-02-07 14:23:22 -0800989 case 2: {
Jason Evans0931cec2016-02-24 11:04:08 -0800990 ssize_t linklen = 0;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700991#ifndef _WIN32
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200992 int saved_errno = errno;
Jason Evanse7339702010-10-23 18:37:06 -0700993 const char *linkname =
Mike Hommeya19e87f2012-04-21 21:27:46 -0700994# ifdef JEMALLOC_PREFIX
Jason Evanse7339702010-10-23 18:37:06 -0700995 "/etc/"JEMALLOC_PREFIX"malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700996# else
Jason Evanse7339702010-10-23 18:37:06 -0700997 "/etc/malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700998# endif
Jason Evanse7339702010-10-23 18:37:06 -0700999 ;
1000
Alexandre Perrindd6ef032013-09-20 19:58:11 +02001001 /*
1002 * Try to use the contents of the "/etc/malloc.conf"
1003 * symbolic link's name.
1004 */
1005 linklen = readlink(linkname, buf, sizeof(buf) - 1);
1006 if (linklen == -1) {
Jason Evanse7339702010-10-23 18:37:06 -07001007 /* No configuration specified. */
Alexandre Perrindd6ef032013-09-20 19:58:11 +02001008 linklen = 0;
Jason Evanse12eaf92014-12-08 14:40:14 -08001009 /* Restore errno. */
Alexandre Perrindd6ef032013-09-20 19:58:11 +02001010 set_errno(saved_errno);
Jason Evanse7339702010-10-23 18:37:06 -07001011 }
Alexandre Perrindd6ef032013-09-20 19:58:11 +02001012#endif
1013 buf[linklen] = '\0';
1014 opts = buf;
Jason Evanse7339702010-10-23 18:37:06 -07001015 break;
Jason Evansf8290092016-02-07 14:23:22 -08001016 } case 3: {
Jason Evanse7339702010-10-23 18:37:06 -07001017 const char *envname =
1018#ifdef JEMALLOC_PREFIX
1019 JEMALLOC_CPREFIX"MALLOC_CONF"
1020#else
1021 "MALLOC_CONF"
1022#endif
1023 ;
1024
Daniel Micayb74041f2014-12-09 17:41:34 -05001025 if ((opts = secure_getenv(envname)) != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -07001026 /*
1027 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -08001028 * the value of the MALLOC_CONF environment
1029 * variable.
Jason Evanse7339702010-10-23 18:37:06 -07001030 */
1031 } else {
1032 /* No configuration specified. */
1033 buf[0] = '\0';
1034 opts = buf;
1035 }
1036 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001037 } default:
Jason Evans6556e282013-10-21 14:56:27 -07001038 not_reached();
Jason Evanse7339702010-10-23 18:37:06 -07001039 buf[0] = '\0';
1040 opts = buf;
1041 }
1042
Jason Evans551ebc42014-10-03 10:16:09 -07001043 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
1044 &vlen)) {
Jason Evansbd87b012014-04-15 16:35:08 -07001045#define CONF_MATCH(n) \
1046 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001047#define CONF_MATCH_VALUE(n) \
1048 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
Jason Evansbd87b012014-04-15 16:35:08 -07001049#define CONF_HANDLE_BOOL(o, n, cont) \
1050 if (CONF_MATCH(n)) { \
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001051 if (CONF_MATCH_VALUE("true")) \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001052 o = true; \
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001053 else if (CONF_MATCH_VALUE("false")) \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001054 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -07001055 else { \
1056 malloc_conf_error( \
1057 "Invalid conf value", \
1058 k, klen, v, vlen); \
1059 } \
Jason Evansbd87b012014-04-15 16:35:08 -07001060 if (cont) \
1061 continue; \
Jason Evans1bf27432012-12-23 08:51:48 -08001062 }
Jason Evans3ea838d2016-11-16 18:28:38 -08001063#define CONF_MIN_no(um, min) false
1064#define CONF_MIN_yes(um, min) ((um) < (min))
1065#define CONF_MAX_no(um, max) false
1066#define CONF_MAX_yes(um, max) ((um) > (max))
1067#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
Jason Evansbd87b012014-04-15 16:35:08 -07001068 if (CONF_MATCH(n)) { \
Jason Evans122449b2012-04-06 00:35:09 -07001069 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -07001070 char *end; \
1071 \
Mike Hommeya14bce82012-04-30 12:38:26 +02001072 set_errno(0); \
Jason Evans41b6afb2012-02-02 22:04:57 -08001073 um = malloc_strtoumax(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +02001074 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -07001075 (uintptr_t)v != vlen) { \
1076 malloc_conf_error( \
1077 "Invalid conf value", \
1078 k, klen, v, vlen); \
Jason Evans1bf27432012-12-23 08:51:48 -08001079 } else if (clip) { \
Jason Evans3ea838d2016-11-16 18:28:38 -08001080 if (CONF_MIN_##check_min(um, \
1081 (min))) \
Jason Evans8f683b92016-02-24 11:03:40 -08001082 o = (t)(min); \
Jason Evans3ea838d2016-11-16 18:28:38 -08001083 else if (CONF_MAX_##check_max( \
1084 um, (max))) \
Jason Evans8f683b92016-02-24 11:03:40 -08001085 o = (t)(max); \
Jason Evans1bf27432012-12-23 08:51:48 -08001086 else \
Jason Evans8f683b92016-02-24 11:03:40 -08001087 o = (t)um; \
Jason Evans1bf27432012-12-23 08:51:48 -08001088 } else { \
Jason Evans3ea838d2016-11-16 18:28:38 -08001089 if (CONF_MIN_##check_min(um, \
1090 (min)) || \
1091 CONF_MAX_##check_max(um, \
1092 (max))) { \
Jason Evans1bf27432012-12-23 08:51:48 -08001093 malloc_conf_error( \
1094 "Out-of-range " \
1095 "conf value", \
1096 k, klen, v, vlen); \
1097 } else \
Jason Evans8f683b92016-02-24 11:03:40 -08001098 o = (t)um; \
Jason Evans1bf27432012-12-23 08:51:48 -08001099 } \
Jason Evanse7339702010-10-23 18:37:06 -07001100 continue; \
1101 }
Jason Evans3ea838d2016-11-16 18:28:38 -08001102#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1103 clip) \
1104 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1105 check_min, check_max, clip)
1106#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1107 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1108 check_min, check_max, clip)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001109#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evansbd87b012014-04-15 16:35:08 -07001110 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001111 long l; \
1112 char *end; \
1113 \
Mike Hommeya14bce82012-04-30 12:38:26 +02001114 set_errno(0); \
Jason Evanse7339702010-10-23 18:37:06 -07001115 l = strtol(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +02001116 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -07001117 (uintptr_t)v != vlen) { \
1118 malloc_conf_error( \
1119 "Invalid conf value", \
1120 k, klen, v, vlen); \
Jason Evansfc0b3b72014-10-09 17:54:06 -07001121 } else if (l < (ssize_t)(min) || l > \
1122 (ssize_t)(max)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001123 malloc_conf_error( \
1124 "Out-of-range conf value", \
1125 k, klen, v, vlen); \
1126 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001127 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -07001128 continue; \
1129 }
Jason Evansd81e4bd2012-03-06 14:57:45 -08001130#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evansbd87b012014-04-15 16:35:08 -07001131 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -07001132 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -08001133 sizeof(o)-1) ? vlen : \
1134 sizeof(o)-1; \
1135 strncpy(o, v, cpylen); \
1136 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -07001137 continue; \
1138 }
1139
Jason Evansbd87b012014-04-15 16:35:08 -07001140 CONF_HANDLE_BOOL(opt_abort, "abort", true)
Jason Evanse7339702010-10-23 18:37:06 -07001141 /*
Jason Evansfc0b3b72014-10-09 17:54:06 -07001142 * Chunks always require at least one header page,
1143 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1144 * possibly an additional page in the presence of
1145 * redzones. In order to simplify options processing,
1146 * use a conservative bound that accommodates all these
1147 * constraints.
Jason Evanse7339702010-10-23 18:37:06 -07001148 */
Jason Evans606f1fd2012-04-20 21:39:14 -07001149 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evansfc0b3b72014-10-09 17:54:06 -07001150 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
Jason Evans3ea838d2016-11-16 18:28:38 -08001151 (sizeof(size_t) << 3) - 1, yes, yes, true)
Jason Evans609ae592012-10-11 13:53:15 -07001152 if (strncmp("dss", k, klen) == 0) {
1153 int i;
1154 bool match = false;
1155 for (i = 0; i < dss_prec_limit; i++) {
1156 if (strncmp(dss_prec_names[i], v, vlen)
1157 == 0) {
Jason Evanse2bcf032016-10-13 12:18:38 -07001158 if (chunk_dss_prec_set(i)) {
Jason Evans609ae592012-10-11 13:53:15 -07001159 malloc_conf_error(
1160 "Error setting dss",
1161 k, klen, v, vlen);
1162 } else {
1163 opt_dss =
1164 dss_prec_names[i];
1165 match = true;
1166 break;
1167 }
1168 }
1169 }
Jason Evans551ebc42014-10-03 10:16:09 -07001170 if (!match) {
Jason Evans609ae592012-10-11 13:53:15 -07001171 malloc_conf_error("Invalid conf value",
1172 k, klen, v, vlen);
1173 }
1174 continue;
1175 }
Jason Evans8f683b92016-02-24 11:03:40 -08001176 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
Jason Evans3ea838d2016-11-16 18:28:38 -08001177 UINT_MAX, yes, no, false)
Jason Evans243f7a02016-02-19 20:09:31 -08001178 if (strncmp("purge", k, klen) == 0) {
1179 int i;
1180 bool match = false;
1181 for (i = 0; i < purge_mode_limit; i++) {
1182 if (strncmp(purge_mode_names[i], v,
1183 vlen) == 0) {
1184 opt_purge = (purge_mode_t)i;
1185 match = true;
1186 break;
1187 }
1188 }
1189 if (!match) {
1190 malloc_conf_error("Invalid conf value",
1191 k, klen, v, vlen);
1192 }
1193 continue;
1194 }
Jason Evans606f1fd2012-04-20 21:39:14 -07001195 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -08001196 -1, (sizeof(size_t) << 3) - 1)
Jason Evans243f7a02016-02-19 20:09:31 -08001197 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
Jason Evans9bad0792016-02-21 11:25:02 -08001198 NSTIME_SEC_MAX);
Jason Evansbd87b012014-04-15 16:35:08 -07001199 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
Jason Evans7372b152012-02-10 20:22:09 -08001200 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001201 if (CONF_MATCH("junk")) {
1202 if (CONF_MATCH_VALUE("true")) {
Jason Evansa2539fa2016-10-12 22:58:40 -07001203 if (config_valgrind &&
1204 unlikely(in_valgrind)) {
1205 malloc_conf_error(
1206 "Deallocation-time "
1207 "junk filling cannot "
1208 "be enabled while "
1209 "running inside "
1210 "Valgrind", k, klen, v,
1211 vlen);
1212 } else {
1213 opt_junk = "true";
1214 opt_junk_alloc = true;
1215 opt_junk_free = true;
1216 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001217 } else if (CONF_MATCH_VALUE("false")) {
1218 opt_junk = "false";
1219 opt_junk_alloc = opt_junk_free =
1220 false;
1221 } else if (CONF_MATCH_VALUE("alloc")) {
1222 opt_junk = "alloc";
1223 opt_junk_alloc = true;
1224 opt_junk_free = false;
1225 } else if (CONF_MATCH_VALUE("free")) {
Jason Evansa2539fa2016-10-12 22:58:40 -07001226 if (config_valgrind &&
1227 unlikely(in_valgrind)) {
1228 malloc_conf_error(
1229 "Deallocation-time "
1230 "junk filling cannot "
1231 "be enabled while "
1232 "running inside "
1233 "Valgrind", k, klen, v,
1234 vlen);
1235 } else {
1236 opt_junk = "free";
1237 opt_junk_alloc = false;
1238 opt_junk_free = true;
1239 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02001240 } else {
1241 malloc_conf_error(
1242 "Invalid conf value", k,
1243 klen, v, vlen);
1244 }
1245 continue;
1246 }
Jason Evans606f1fd2012-04-20 21:39:14 -07001247 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans3ea838d2016-11-16 18:28:38 -08001248 0, SIZE_T_MAX, no, no, false)
Jason Evansbd87b012014-04-15 16:35:08 -07001249 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1250 CONF_HANDLE_BOOL(opt_zero, "zero", true)
Jason Evans7372b152012-02-10 20:22:09 -08001251 }
Jason Evansb1476112012-04-05 13:36:17 -07001252 if (config_utrace) {
Jason Evansbd87b012014-04-15 16:35:08 -07001253 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
Jason Evansb1476112012-04-05 13:36:17 -07001254 }
Jason Evans7372b152012-02-10 20:22:09 -08001255 if (config_xmalloc) {
Jason Evansbd87b012014-04-15 16:35:08 -07001256 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
Jason Evans7372b152012-02-10 20:22:09 -08001257 }
1258 if (config_tcache) {
Jason Evansbd87b012014-04-15 16:35:08 -07001259 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1260 !config_valgrind || !in_valgrind)
1261 if (CONF_MATCH("tcache")) {
1262 assert(config_valgrind && in_valgrind);
1263 if (opt_tcache) {
1264 opt_tcache = false;
1265 malloc_conf_error(
1266 "tcache cannot be enabled "
1267 "while running inside Valgrind",
1268 k, klen, v, vlen);
1269 }
1270 continue;
1271 }
Jason Evansd81e4bd2012-03-06 14:57:45 -08001272 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -07001273 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -08001274 (sizeof(size_t) << 3) - 1)
1275 }
1276 if (config_prof) {
Jason Evansbd87b012014-04-15 16:35:08 -07001277 CONF_HANDLE_BOOL(opt_prof, "prof", true)
Jason Evans606f1fd2012-04-20 21:39:14 -07001278 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1279 "prof_prefix", "jeprof")
Jason Evansbd87b012014-04-15 16:35:08 -07001280 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1281 true)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001282 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1283 "prof_thread_active_init", true)
Jason Evans602c8e02014-08-18 16:22:13 -07001284 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
Jason Evans3ea838d2016-11-16 18:28:38 -08001285 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1286 - 1, no, yes, true)
Jason Evansbd87b012014-04-15 16:35:08 -07001287 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1288 true)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001289 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -07001290 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -08001291 (sizeof(uint64_t) << 3) - 1)
Jason Evansbd87b012014-04-15 16:35:08 -07001292 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1293 true)
1294 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1295 true)
1296 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1297 true)
Jason Evans7372b152012-02-10 20:22:09 -08001298 }
Jason Evanse7339702010-10-23 18:37:06 -07001299 malloc_conf_error("Invalid conf pair", k, klen, v,
1300 vlen);
Jason Evansbd87b012014-04-15 16:35:08 -07001301#undef CONF_MATCH
Jason Evans3ea838d2016-11-16 18:28:38 -08001302#undef CONF_MATCH_VALUE
Jason Evanse7339702010-10-23 18:37:06 -07001303#undef CONF_HANDLE_BOOL
Jason Evans3ea838d2016-11-16 18:28:38 -08001304#undef CONF_MIN_no
1305#undef CONF_MIN_yes
1306#undef CONF_MAX_no
1307#undef CONF_MAX_yes
1308#undef CONF_HANDLE_T_U
1309#undef CONF_HANDLE_UNSIGNED
Jason Evanse7339702010-10-23 18:37:06 -07001310#undef CONF_HANDLE_SIZE_T
1311#undef CONF_HANDLE_SSIZE_T
1312#undef CONF_HANDLE_CHAR_P
1313 }
Jason Evanse7339702010-10-23 18:37:06 -07001314 }
1315}
1316
1317static bool
Jason Evans10aff3f2015-01-20 15:37:51 -08001318malloc_init_hard_needed(void)
Jason Evanse7339702010-10-23 18:37:06 -07001319{
Jason Evans289053c2009-06-22 12:08:42 -07001320
Jason Evans10aff3f2015-01-20 15:37:51 -08001321 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1322 malloc_init_recursible)) {
Jason Evans289053c2009-06-22 12:08:42 -07001323 /*
1324 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -08001325 * acquired init_lock, or this thread is the initializing
1326 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -07001327 */
Jason Evans289053c2009-06-22 12:08:42 -07001328 return (false);
1329 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001330#ifdef JEMALLOC_THREADED_INIT
Jason Evans551ebc42014-10-03 10:16:09 -07001331 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
Jason Evans97376852016-10-13 14:47:50 -07001332 spin_t spinner;
1333
Jason Evansb7924f52009-06-23 19:01:18 -07001334 /* Busy-wait until the initializing thread completes. */
Jason Evans97376852016-10-13 14:47:50 -07001335 spin_init(&spinner);
Jason Evansb7924f52009-06-23 19:01:18 -07001336 do {
Jason Evansbcd54242016-05-12 21:07:08 -07001337 malloc_mutex_unlock(TSDN_NULL, &init_lock);
Jason Evans97376852016-10-13 14:47:50 -07001338 spin_adaptive(&spinner);
Jason Evansbcd54242016-05-12 21:07:08 -07001339 malloc_mutex_lock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001340 } while (!malloc_initialized());
Jason Evansb7924f52009-06-23 19:01:18 -07001341 return (false);
1342 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001343#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08001344 return (true);
1345}
Jason Evans289053c2009-06-22 12:08:42 -07001346
Jason Evans10aff3f2015-01-20 15:37:51 -08001347static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001348malloc_init_hard_a0_locked()
Jason Evans10aff3f2015-01-20 15:37:51 -08001349{
1350
1351 malloc_initializer = INITIALIZER;
Jason Evans5460aa62014-09-22 21:09:23 -07001352
Jason Evans7372b152012-02-10 20:22:09 -08001353 if (config_prof)
1354 prof_boot0();
Jason Evanse7339702010-10-23 18:37:06 -07001355 malloc_conf_init();
Jason Evans03c22372010-01-03 12:10:42 -08001356 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -07001357 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -08001358 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001359 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -08001360 if (opt_abort)
1361 abort();
1362 }
Jason Evans289053c2009-06-22 12:08:42 -07001363 }
Jason Evansc2f970c2016-05-05 17:45:02 -07001364 pages_boot();
Jason Evans10aff3f2015-01-20 15:37:51 -08001365 if (base_boot())
Jason Evansa0bf2422010-01-29 14:30:41 -08001366 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001367 if (chunk_boot())
Jason Evans3c234352010-01-27 13:10:55 -08001368 return (true);
Jason Evans10aff3f2015-01-20 15:37:51 -08001369 if (ctl_boot())
Jason Evans41b6afb2012-02-02 22:04:57 -08001370 return (true);
Jason Evans7372b152012-02-10 20:22:09 -08001371 if (config_prof)
1372 prof_boot1();
Jason Evans5d8db152016-04-08 14:16:19 -07001373 arena_boot();
Jason Evansc1e00ef2016-05-10 22:21:10 -07001374 if (config_tcache && tcache_boot(TSDN_NULL))
Jason Evans84c8eef2011-03-16 10:30:13 -07001375 return (true);
Jason Evansb2c0d632016-04-13 23:36:15 -07001376 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
Jason Evans8e6f8b42011-11-03 18:40:03 -07001377 return (true);
Jason Evansb7924f52009-06-23 19:01:18 -07001378 /*
1379 * Create enough scaffolding to allow recursive allocation in
1380 * malloc_ncpus().
1381 */
Jason Evans767d8502016-02-24 23:58:10 -08001382 narenas_auto = 1;
1383 narenas_total_set(narenas_auto);
Jason Evans10aff3f2015-01-20 15:37:51 -08001384 arenas = &a0;
Jason Evans609ae592012-10-11 13:53:15 -07001385 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
Jason Evansb7924f52009-06-23 19:01:18 -07001386 /*
1387 * Initialize one arena here. The rest are lazily created in
Jason Evans8bb31982014-10-07 23:14:57 -07001388 * arena_choose_hard().
Jason Evansb7924f52009-06-23 19:01:18 -07001389 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001390 if (arena_init(TSDN_NULL, 0) == NULL)
Jason Evansb7924f52009-06-23 19:01:18 -07001391 return (true);
Jason Evans0c12dca2016-05-07 12:42:31 -07001392
Jason Evans10aff3f2015-01-20 15:37:51 -08001393 malloc_init_state = malloc_init_a0_initialized;
Jason Evans0c12dca2016-05-07 12:42:31 -07001394
Jason Evans10aff3f2015-01-20 15:37:51 -08001395 return (false);
1396}
Jason Evansb7924f52009-06-23 19:01:18 -07001397
Jason Evans10aff3f2015-01-20 15:37:51 -08001398static bool
1399malloc_init_hard_a0(void)
1400{
1401 bool ret;
Jason Evans6da54182012-03-23 18:05:51 -07001402
Jason Evansc1e00ef2016-05-10 22:21:10 -07001403 malloc_mutex_lock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001404 ret = malloc_init_hard_a0_locked();
Jason Evansc1e00ef2016-05-10 22:21:10 -07001405 malloc_mutex_unlock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001406 return (ret);
1407}
1408
Jason Evansb2c0d632016-04-13 23:36:15 -07001409/* Initialize data structures which may trigger recursive allocation. */
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001410static bool
Jason Evans10aff3f2015-01-20 15:37:51 -08001411malloc_init_hard_recursible(void)
1412{
1413
1414 malloc_init_state = malloc_init_recursible;
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001415
Jason Evansb7924f52009-06-23 19:01:18 -07001416 ncpus = malloc_ncpus();
Leonard Crestezac4403c2013-10-22 00:11:09 +03001417
Jason Evans949a27f2016-11-17 15:14:57 -08001418#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1419 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1420 !defined(__native_client__))
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001421 /* LinuxThreads' pthread_atfork() allocates. */
Leonard Crestezac4403c2013-10-22 00:11:09 +03001422 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1423 jemalloc_postfork_child) != 0) {
1424 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1425 if (opt_abort)
1426 abort();
Jason Evans0c12dca2016-05-07 12:42:31 -07001427 return (true);
Leonard Crestezac4403c2013-10-22 00:11:09 +03001428 }
1429#endif
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001430
Jason Evans0c12dca2016-05-07 12:42:31 -07001431 return (false);
Jason Evans10aff3f2015-01-20 15:37:51 -08001432}
Jason Evansb7924f52009-06-23 19:01:18 -07001433
Jason Evans10aff3f2015-01-20 15:37:51 -08001434static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001435malloc_init_hard_finish(tsdn_t *tsdn)
Jason Evans10aff3f2015-01-20 15:37:51 -08001436{
1437
Jason Evansb2c0d632016-04-13 23:36:15 -07001438 if (malloc_mutex_boot())
Jason Evans633aaff2012-04-03 08:47:07 -07001439 return (true);
Jason Evans633aaff2012-04-03 08:47:07 -07001440
Jason Evanse7339702010-10-23 18:37:06 -07001441 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -07001442 /*
Jason Evans5463a522009-12-29 00:09:15 -08001443 * For SMP systems, create more than one arena per CPU by
1444 * default.
Jason Evans289053c2009-06-22 12:08:42 -07001445 */
Jason Evanse7339702010-10-23 18:37:06 -07001446 if (ncpus > 1)
1447 opt_narenas = ncpus << 2;
1448 else
1449 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001450 }
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07001451#if defined(ANDROID_MAX_ARENAS)
1452 /* Never create more than MAX_ARENAS arenas regardless of num_cpus.
1453 * Extra arenas use more PSS and are not very useful unless
1454 * lots of threads are allocing/freeing at the same time.
1455 */
1456 if (opt_narenas > ANDROID_MAX_ARENAS)
1457 opt_narenas = ANDROID_MAX_ARENAS;
1458#endif
Jason Evans609ae592012-10-11 13:53:15 -07001459 narenas_auto = opt_narenas;
Jason Evanse7339702010-10-23 18:37:06 -07001460 /*
Jason Evans767d8502016-02-24 23:58:10 -08001461 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
Jason Evanse7339702010-10-23 18:37:06 -07001462 */
Jason Evans767d8502016-02-24 23:58:10 -08001463 if (narenas_auto > MALLOCX_ARENA_MAX) {
1464 narenas_auto = MALLOCX_ARENA_MAX;
Jason Evansd81e4bd2012-03-06 14:57:45 -08001465 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
Jason Evans609ae592012-10-11 13:53:15 -07001466 narenas_auto);
Jason Evans289053c2009-06-22 12:08:42 -07001467 }
Jason Evans767d8502016-02-24 23:58:10 -08001468 narenas_total_set(narenas_auto);
Jason Evans289053c2009-06-22 12:08:42 -07001469
Jason Evans289053c2009-06-22 12:08:42 -07001470 /* Allocate and initialize arenas. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001471 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
Jason Evans767d8502016-02-24 23:58:10 -08001472 (MALLOCX_ARENA_MAX+1));
Jason Evans10aff3f2015-01-20 15:37:51 -08001473 if (arenas == NULL)
Jason Evans289053c2009-06-22 12:08:42 -07001474 return (true);
Jason Evansb7924f52009-06-23 19:01:18 -07001475 /* Copy the pointer to the one arena that was already initialized. */
Jason Evans767d8502016-02-24 23:58:10 -08001476 arena_set(0, a0);
Jason Evans289053c2009-06-22 12:08:42 -07001477
Jason Evans10aff3f2015-01-20 15:37:51 -08001478 malloc_init_state = malloc_init_initialized;
Qi Wangf4a0f322015-10-27 15:12:10 -07001479 malloc_slow_flag_init();
1480
Jason Evans10aff3f2015-01-20 15:37:51 -08001481 return (false);
1482}
1483
1484static bool
1485malloc_init_hard(void)
1486{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001487 tsd_t *tsd;
Jason Evans10aff3f2015-01-20 15:37:51 -08001488
Mike Hommey0a116fa2015-09-03 15:48:48 +09001489#if defined(_WIN32) && _WIN32_WINNT < 0x0600
1490 _init_init_lock();
1491#endif
Jason Evansc1e00ef2016-05-10 22:21:10 -07001492 malloc_mutex_lock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001493 if (!malloc_init_hard_needed()) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001494 malloc_mutex_unlock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001495 return (false);
1496 }
1497
1498 if (malloc_init_state != malloc_init_a0_initialized &&
1499 malloc_init_hard_a0_locked()) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001500 malloc_mutex_unlock(TSDN_NULL, &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001501 return (true);
1502 }
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001503
Jason Evansc1e00ef2016-05-10 22:21:10 -07001504 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1505 /* Recursive allocation relies on functional tsd. */
1506 tsd = malloc_tsd_boot0();
1507 if (tsd == NULL)
1508 return (true);
1509 if (malloc_init_hard_recursible())
1510 return (true);
1511 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1512
Jason Evans962a2972016-10-20 23:59:12 -07001513 if (config_prof && prof_boot2(tsd)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001514 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
Jason Evans10aff3f2015-01-20 15:37:51 -08001515 return (true);
1516 }
1517
Jason Evansc1e00ef2016-05-10 22:21:10 -07001518 if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
1519 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
Cosmin Paraschiv9cb481a2016-01-11 11:05:00 -08001520 return (true);
1521 }
Jason Evans10aff3f2015-01-20 15:37:51 -08001522
Jason Evansc1e00ef2016-05-10 22:21:10 -07001523 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
Jason Evans8bb31982014-10-07 23:14:57 -07001524 malloc_tsd_boot1();
Jason Evans289053c2009-06-22 12:08:42 -07001525 return (false);
1526}
1527
1528/*
Jason Evanse476f8a2010-01-16 09:53:50 -08001529 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -07001530 */
1531/******************************************************************************/
1532/*
1533 * Begin malloc(3)-compatible functions.
1534 */
1535
Jason Evansb2c31662014-01-12 15:05:44 -08001536static void *
Jason Evans3ef51d72016-05-06 12:16:00 -07001537ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07001538 prof_tctx_t *tctx, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001539{
1540 void *p;
1541
Jason Evans602c8e02014-08-18 16:22:13 -07001542 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001543 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001544 if (usize <= SMALL_MAXCLASS) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001545 szind_t ind_large = size2index(LARGE_MINCLASS);
Jason Evans3ef51d72016-05-06 12:16:00 -07001546 p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001547 if (p == NULL)
1548 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001549 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001550 } else
Jason Evans3ef51d72016-05-06 12:16:00 -07001551 p = ialloc(tsd, usize, ind, zero, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001552
1553 return (p);
1554}
1555
1556JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans3ef51d72016-05-06 12:16:00 -07001557ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001558{
1559 void *p;
Jason Evans602c8e02014-08-18 16:22:13 -07001560 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001561
Jason Evanscec0d632015-09-14 23:17:25 -07001562 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001563 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans3ef51d72016-05-06 12:16:00 -07001564 p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08001565 else
Jason Evans3ef51d72016-05-06 12:16:00 -07001566 p = ialloc(tsd, usize, ind, zero, slow_path);
Jason Evanscfc57062014-10-30 23:18:45 -07001567 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001568 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001569 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001570 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001571 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001572
1573 return (p);
1574}
1575
Jason Evans3ef51d72016-05-06 12:16:00 -07001576/*
1577 * ialloc_body() is inlined so that fast and slow paths are generated separately
1578 * with statically known slow_path.
Jason Evansc1e00ef2016-05-10 22:21:10 -07001579 *
1580 * This function guarantees that *tsdn is non-NULL on success.
Jason Evans3ef51d72016-05-06 12:16:00 -07001581 */
Jason Evans6f001052014-04-22 18:41:15 -07001582JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001583ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
1584 bool slow_path)
Jason Evans6f001052014-04-22 18:41:15 -07001585{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001586 tsd_t *tsd;
Qi Wangf4a0f322015-10-27 15:12:10 -07001587 szind_t ind;
Jason Evans6f001052014-04-22 18:41:15 -07001588
Jason Evans3ef51d72016-05-06 12:16:00 -07001589 if (slow_path && unlikely(malloc_init())) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001590 *tsdn = NULL;
Jason Evans6f001052014-04-22 18:41:15 -07001591 return (NULL);
Jason Evans3ef51d72016-05-06 12:16:00 -07001592 }
Jason Evansb2c0d632016-04-13 23:36:15 -07001593
Jason Evansc1e00ef2016-05-10 22:21:10 -07001594 tsd = tsd_fetch();
1595 *tsdn = tsd_tsdn(tsd);
1596 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07001597
Qi Wangf4a0f322015-10-27 15:12:10 -07001598 ind = size2index(size);
Jason Evans0c516a02016-02-25 15:29:49 -08001599 if (unlikely(ind >= NSIZES))
1600 return (NULL);
Jason Evans6f001052014-04-22 18:41:15 -07001601
Jason Evans0c516a02016-02-25 15:29:49 -08001602 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1603 config_valgrind && unlikely(in_valgrind))) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001604 *usize = index2size(ind);
Jason Evans0c516a02016-02-25 15:29:49 -08001605 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
Jason Evans6f001052014-04-22 18:41:15 -07001606 }
1607
Jason Evans0c516a02016-02-25 15:29:49 -08001608 if (config_prof && opt_prof)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001609 return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
Qi Wangf4a0f322015-10-27 15:12:10 -07001610
Jason Evansc1e00ef2016-05-10 22:21:10 -07001611 return (ialloc(tsd, size, ind, zero, slow_path));
Qi Wangf4a0f322015-10-27 15:12:10 -07001612}
1613
1614JEMALLOC_ALWAYS_INLINE_C void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001615ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
Jason Evans3ef51d72016-05-06 12:16:00 -07001616 bool update_errno, bool slow_path)
Qi Wangf4a0f322015-10-27 15:12:10 -07001617{
Jason Evans3ef51d72016-05-06 12:16:00 -07001618
Jason Evansc1e00ef2016-05-10 22:21:10 -07001619 assert(!tsdn_null(tsdn) || ret == NULL);
1620
Qi Wangf4a0f322015-10-27 15:12:10 -07001621 if (unlikely(ret == NULL)) {
1622 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evans3ef51d72016-05-06 12:16:00 -07001623 malloc_printf("<jemalloc>: Error in %s(): out of "
1624 "memory\n", func);
Qi Wangf4a0f322015-10-27 15:12:10 -07001625 abort();
1626 }
Jason Evans3ef51d72016-05-06 12:16:00 -07001627 if (update_errno)
1628 set_errno(ENOMEM);
Qi Wangf4a0f322015-10-27 15:12:10 -07001629 }
1630 if (config_stats && likely(ret != NULL)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001631 assert(usize == isalloc(tsdn, ret, config_prof));
1632 *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
Qi Wangf4a0f322015-10-27 15:12:10 -07001633 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001634 witness_assert_lockless(tsdn);
Jason Evans6f001052014-04-22 18:41:15 -07001635}
Jason Evansb2c31662014-01-12 15:05:44 -08001636
Matthijsc1a6a512015-07-27 22:48:27 +02001637JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1638void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001639JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
Jason Evans0a5489e2012-03-01 17:19:20 -08001640je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001641{
1642 void *ret;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001643 tsdn_t *tsdn;
Jason Evans8694e2e2012-04-23 13:05:32 -07001644 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001645
Jason Evansc90ad712012-02-28 20:31:37 -08001646 if (size == 0)
1647 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001648
Qi Wangf4a0f322015-10-27 15:12:10 -07001649 if (likely(!malloc_slow)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001650 ret = ialloc_body(size, false, &tsdn, &usize, false);
1651 ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
Qi Wangf4a0f322015-10-27 15:12:10 -07001652 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001653 ret = ialloc_body(size, false, &tsdn, &usize, true);
1654 ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
Qi Wangf4a0f322015-10-27 15:12:10 -07001655 UTRACE(0, size, ret);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001656 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001657 }
Qi Wangf4a0f322015-10-27 15:12:10 -07001658
Jason Evans289053c2009-06-22 12:08:42 -07001659 return (ret);
1660}
1661
Jason Evansb2c31662014-01-12 15:05:44 -08001662static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001663imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1664 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001665{
1666 void *p;
1667
Jason Evans602c8e02014-08-18 16:22:13 -07001668 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001669 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001670 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07001671 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
Jason Evans241abc62015-06-23 18:47:07 -07001672 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001673 if (p == NULL)
1674 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001675 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001676 } else
Jason Evans5460aa62014-09-22 21:09:23 -07001677 p = ipalloc(tsd, usize, alignment, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001678
1679 return (p);
1680}
1681
1682JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001683imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001684{
1685 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001686 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001687
Jason Evanscec0d632015-09-14 23:17:25 -07001688 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001689 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans5460aa62014-09-22 21:09:23 -07001690 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001691 else
Jason Evans5460aa62014-09-22 21:09:23 -07001692 p = ipalloc(tsd, usize, alignment, false);
Jason Evanscfc57062014-10-30 23:18:45 -07001693 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001694 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001695 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001696 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001697 prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001698
1699 return (p);
1700}
1701
Jason Evans9ad48232010-01-03 11:59:20 -08001702JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -07001703static int
Jason Evansb2c31662014-01-12 15:05:44 -08001704imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -07001705{
1706 int ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001707 tsd_t *tsd;
Jason Evans7372b152012-02-10 20:22:09 -08001708 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -07001709 void *result;
Jason Evans289053c2009-06-22 12:08:42 -07001710
Jason Evans0a0bbf62012-03-13 12:55:21 -07001711 assert(min_alignment != 0);
1712
Jason Evans029d44c2014-10-04 11:12:53 -07001713 if (unlikely(malloc_init())) {
Jason Evansb2c0d632016-04-13 23:36:15 -07001714 tsd = NULL;
Jason Evans289053c2009-06-22 12:08:42 -07001715 result = NULL;
Jason Evansb2c31662014-01-12 15:05:44 -08001716 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -07001717 }
Jason Evansdc0610a2015-06-22 18:48:58 -07001718 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07001719 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansdc0610a2015-06-22 18:48:58 -07001720 if (size == 0)
1721 size = 1;
1722
1723 /* Make sure that alignment is a large enough power of 2. */
1724 if (unlikely(((alignment - 1) & alignment) != 0
1725 || (alignment < min_alignment))) {
1726 if (config_xmalloc && unlikely(opt_xmalloc)) {
1727 malloc_write("<jemalloc>: Error allocating "
1728 "aligned memory: invalid alignment\n");
1729 abort();
1730 }
1731 result = NULL;
1732 ret = EINVAL;
1733 goto label_return;
1734 }
1735
1736 usize = sa2u(size, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08001737 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
Jason Evansdc0610a2015-06-22 18:48:58 -07001738 result = NULL;
1739 goto label_oom;
1740 }
1741
1742 if (config_prof && opt_prof)
1743 result = imemalign_prof(tsd, alignment, usize);
1744 else
1745 result = ipalloc(tsd, usize, alignment, false);
1746 if (unlikely(result == NULL))
1747 goto label_oom;
1748 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
Jason Evans289053c2009-06-22 12:08:42 -07001749
1750 *memptr = result;
1751 ret = 0;
Jason Evansa1ee7832012-04-10 15:07:44 -07001752label_return:
Jason Evans9c640bf2014-09-11 16:20:44 -07001753 if (config_stats && likely(result != NULL)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001754 assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001755 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001756 }
Jason Evansb1476112012-04-05 13:36:17 -07001757 UTRACE(0, size, result);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001758 JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
1759 false);
1760 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans289053c2009-06-22 12:08:42 -07001761 return (ret);
Jason Evansb2c31662014-01-12 15:05:44 -08001762label_oom:
1763 assert(result == NULL);
Jason Evans9c640bf2014-09-11 16:20:44 -07001764 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001765 malloc_write("<jemalloc>: Error allocating aligned memory: "
1766 "out of memory\n");
1767 abort();
1768 }
1769 ret = ENOMEM;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001770 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c31662014-01-12 15:05:44 -08001771 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001772}
1773
Jason Evans00632602015-07-21 08:10:38 -07001774JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1775JEMALLOC_ATTR(nonnull(1))
Jason Evans0a5489e2012-03-01 17:19:20 -08001776je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -07001777{
Jason Evansb2c0d632016-04-13 23:36:15 -07001778 int ret;
1779
1780 ret = imemalign(memptr, alignment, size, sizeof(void *));
1781
Jason Evans122449b2012-04-06 00:35:09 -07001782 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001783}
1784
Matthijsc1a6a512015-07-27 22:48:27 +02001785JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1786void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001787JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
Jason Evans0a0bbf62012-03-13 12:55:21 -07001788je_aligned_alloc(size_t alignment, size_t size)
1789{
1790 void *ret;
1791 int err;
1792
Jason Evans9c640bf2014-09-11 16:20:44 -07001793 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
Jason Evans0a0bbf62012-03-13 12:55:21 -07001794 ret = NULL;
Mike Hommeya14bce82012-04-30 12:38:26 +02001795 set_errno(err);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001796 }
Jason Evansb2c0d632016-04-13 23:36:15 -07001797
Jason Evans0a0bbf62012-03-13 12:55:21 -07001798 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -07001799}
1800
Matthijsc1a6a512015-07-27 22:48:27 +02001801JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1802void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001803JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
Jason Evans0a5489e2012-03-01 17:19:20 -08001804je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001805{
1806 void *ret;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001807 tsdn_t *tsdn;
Jason Evans289053c2009-06-22 12:08:42 -07001808 size_t num_size;
Jason Evans8694e2e2012-04-23 13:05:32 -07001809 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001810
Jason Evans289053c2009-06-22 12:08:42 -07001811 num_size = num * size;
Jason Evans9c640bf2014-09-11 16:20:44 -07001812 if (unlikely(num_size == 0)) {
Jason Evansc90ad712012-02-28 20:31:37 -08001813 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001814 num_size = 1;
Jason Evans3ef51d72016-05-06 12:16:00 -07001815 else
1816 num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
Jason Evans289053c2009-06-22 12:08:42 -07001817 /*
1818 * Try to avoid division here. We know that it isn't possible to
1819 * overflow during multiplication if neither operand uses any of the
1820 * most significant half of the bits in a size_t.
1821 */
Jason Evans9c640bf2014-09-11 16:20:44 -07001822 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
Jason Evans3ef51d72016-05-06 12:16:00 -07001823 2))) && (num_size / size != num)))
1824 num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
Jason Evans289053c2009-06-22 12:08:42 -07001825
Jason Evans3ef51d72016-05-06 12:16:00 -07001826 if (likely(!malloc_slow)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001827 ret = ialloc_body(num_size, true, &tsdn, &usize, false);
1828 ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
Jason Evans7372b152012-02-10 20:22:09 -08001829 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001830 ret = ialloc_body(num_size, true, &tsdn, &usize, true);
1831 ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
Jason Evans3ef51d72016-05-06 12:16:00 -07001832 UTRACE(0, num_size, ret);
Elliot Ronaghan9de00942016-06-07 14:27:24 -07001833 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
Jason Evans93443682010-10-20 17:39:18 -07001834 }
Jason Evans289053c2009-06-22 12:08:42 -07001835
Jason Evans289053c2009-06-22 12:08:42 -07001836 return (ret);
1837}
1838
Jason Evansb2c31662014-01-12 15:05:44 -08001839static void *
Jason Evansd9704042015-09-14 23:28:32 -07001840irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
Daniel Micayd33f8342014-10-24 13:18:57 -04001841 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001842{
1843 void *p;
1844
Jason Evans602c8e02014-08-18 16:22:13 -07001845 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001846 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001847 if (usize <= SMALL_MAXCLASS) {
Jason Evansd9704042015-09-14 23:28:32 -07001848 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001849 if (p == NULL)
1850 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001851 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001852 } else
Jason Evansd9704042015-09-14 23:28:32 -07001853 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001854
1855 return (p);
1856}
1857
1858JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansd9704042015-09-14 23:28:32 -07001859irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001860{
1861 void *p;
Jason Evanscec0d632015-09-14 23:17:25 -07001862 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07001863 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001864
Jason Evanscec0d632015-09-14 23:17:25 -07001865 prof_active = prof_active_get_unlocked();
Jason Evansc1e00ef2016-05-10 22:21:10 -07001866 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
Jason Evanscec0d632015-09-14 23:17:25 -07001867 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001868 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evansd9704042015-09-14 23:28:32 -07001869 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001870 else
Jason Evansd9704042015-09-14 23:28:32 -07001871 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
Jason Evansef363de2015-09-14 22:45:31 -07001872 if (unlikely(p == NULL)) {
1873 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001874 return (NULL);
Jason Evansef363de2015-09-14 22:45:31 -07001875 }
Jason Evans708ed792015-09-14 23:48:11 -07001876 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
Jason Evanscec0d632015-09-14 23:17:25 -07001877 old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001878
1879 return (p);
1880}
1881
1882JEMALLOC_INLINE_C void
Qi Wangf4a0f322015-10-27 15:12:10 -07001883ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08001884{
1885 size_t usize;
1886 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1887
Jason Evansc1e00ef2016-05-10 22:21:10 -07001888 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07001889
Jason Evansb2c31662014-01-12 15:05:44 -08001890 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08001891 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansb2c31662014-01-12 15:05:44 -08001892
1893 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001894 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans5460aa62014-09-22 21:09:23 -07001895 prof_free(tsd, ptr, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001896 } else if (config_stats || config_valgrind)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001897 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans029d44c2014-10-04 11:12:53 -07001898 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001899 *tsd_thread_deallocatedp_get(tsd) += usize;
Qi Wangf4a0f322015-10-27 15:12:10 -07001900
1901 if (likely(!slow_path))
1902 iqalloc(tsd, ptr, tcache, false);
1903 else {
1904 if (config_valgrind && unlikely(in_valgrind))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001905 rzsize = p2rz(tsd_tsdn(tsd), ptr);
Qi Wangf4a0f322015-10-27 15:12:10 -07001906 iqalloc(tsd, ptr, tcache, true);
1907 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1908 }
Jason Evansb2c31662014-01-12 15:05:44 -08001909}
1910
Daniel Micay4cfe5512014-08-28 15:41:48 -04001911JEMALLOC_INLINE_C void
Jason Evans3ef51d72016-05-06 12:16:00 -07001912isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
Daniel Micay4cfe5512014-08-28 15:41:48 -04001913{
1914 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1915
Jason Evansc1e00ef2016-05-10 22:21:10 -07001916 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07001917
Daniel Micay4cfe5512014-08-28 15:41:48 -04001918 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08001919 assert(malloc_initialized() || IS_INITIALIZER);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001920
1921 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07001922 prof_free(tsd, ptr, usize);
Jason Evans029d44c2014-10-04 11:12:53 -07001923 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001924 *tsd_thread_deallocatedp_get(tsd) += usize;
Jason Evans9c640bf2014-09-11 16:20:44 -07001925 if (config_valgrind && unlikely(in_valgrind))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001926 rzsize = p2rz(tsd_tsdn(tsd), ptr);
Jason Evans3ef51d72016-05-06 12:16:00 -07001927 isqalloc(tsd, ptr, usize, tcache, slow_path);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001928 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1929}
1930
Matthijsc1a6a512015-07-27 22:48:27 +02001931JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1932void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07001933JEMALLOC_ALLOC_SIZE(2)
Jason Evans0a5489e2012-03-01 17:19:20 -08001934je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001935{
1936 void *ret;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001937 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans8694e2e2012-04-23 13:05:32 -07001938 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans66576932013-12-15 16:21:30 -08001939 size_t old_usize = 0;
Jason Evans73692322013-12-10 13:51:52 -08001940 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans6109fe02010-02-10 10:37:56 -08001941
Jason Evans9c640bf2014-09-11 16:20:44 -07001942 if (unlikely(size == 0)) {
Jason Evansf081b882012-02-28 20:24:05 -08001943 if (ptr != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001944 tsd_t *tsd;
1945
Jason Evansb2c31662014-01-12 15:05:44 -08001946 /* realloc(ptr, 0) is equivalent to free(ptr). */
1947 UTRACE(ptr, 0, 0);
Jason Evans029d44c2014-10-04 11:12:53 -07001948 tsd = tsd_fetch();
Qi Wangf4a0f322015-10-27 15:12:10 -07001949 ifree(tsd, ptr, tcache_get(tsd, false), true);
Jason Evansb2c31662014-01-12 15:05:44 -08001950 return (NULL);
1951 }
1952 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001953 }
1954
Jason Evans9c640bf2014-09-11 16:20:44 -07001955 if (likely(ptr != NULL)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001956 tsd_t *tsd;
1957
Jason Evans10aff3f2015-01-20 15:37:51 -08001958 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08001959 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07001960 tsd = tsd_fetch();
Jason Evans289053c2009-06-22 12:08:42 -07001961
Jason Evansc1e00ef2016-05-10 22:21:10 -07001962 witness_assert_lockless(tsd_tsdn(tsd));
1963
1964 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evansb2c0d632016-04-13 23:36:15 -07001965 if (config_valgrind && unlikely(in_valgrind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001966 old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
Jason Evansb2c0d632016-04-13 23:36:15 -07001967 u2rz(old_usize);
1968 }
Jason Evansb2c31662014-01-12 15:05:44 -08001969
Jason Evans029d44c2014-10-04 11:12:53 -07001970 if (config_prof && opt_prof) {
1971 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08001972 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1973 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
Jason Evans029d44c2014-10-04 11:12:53 -07001974 } else {
1975 if (config_stats || (config_valgrind &&
1976 unlikely(in_valgrind)))
Jason Evans7372b152012-02-10 20:22:09 -08001977 usize = s2u(size);
Daniel Micayd33f8342014-10-24 13:18:57 -04001978 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
Jason Evans029d44c2014-10-04 11:12:53 -07001979 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001980 tsdn = tsd_tsdn(tsd);
Jason Evans289053c2009-06-22 12:08:42 -07001981 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001982 /* realloc(NULL, size) is equivalent to malloc(size). */
Qi Wangf4a0f322015-10-27 15:12:10 -07001983 if (likely(!malloc_slow))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001984 ret = ialloc_body(size, false, &tsdn, &usize, false);
Qi Wangf4a0f322015-10-27 15:12:10 -07001985 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001986 ret = ialloc_body(size, false, &tsdn, &usize, true);
1987 assert(!tsdn_null(tsdn) || ret == NULL);
Jason Evans289053c2009-06-22 12:08:42 -07001988 }
1989
Jason Evans9c640bf2014-09-11 16:20:44 -07001990 if (unlikely(ret == NULL)) {
1991 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001992 malloc_write("<jemalloc>: Error in realloc(): "
1993 "out of memory\n");
1994 abort();
1995 }
1996 set_errno(ENOMEM);
1997 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001998 if (config_stats && likely(ret != NULL)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001999 tsd_t *tsd;
2000
2001 assert(usize == isalloc(tsdn, ret, config_prof));
2002 tsd = tsdn_tsd(tsdn);
Jason Evans029d44c2014-10-04 11:12:53 -07002003 *tsd_thread_allocatedp_get(tsd) += usize;
2004 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evans93443682010-10-20 17:39:18 -07002005 }
Jason Evansb1476112012-04-05 13:36:17 -07002006 UTRACE(ptr, size, ret);
Jason Evans0d6a4722016-11-16 18:53:59 -08002007 JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
2008 old_usize, old_rzsize, maybe, false);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002009 witness_assert_lockless(tsdn);
Jason Evans289053c2009-06-22 12:08:42 -07002010 return (ret);
2011}
2012
Jason Evans00632602015-07-21 08:10:38 -07002013JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002014je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07002015{
2016
Jason Evansb1476112012-04-05 13:36:17 -07002017 UTRACE(ptr, 0, 0);
Jason Evans1cb181e2015-01-29 15:30:47 -08002018 if (likely(ptr != NULL)) {
2019 tsd_t *tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002020 witness_assert_lockless(tsd_tsdn(tsd));
Qi Wangf4a0f322015-10-27 15:12:10 -07002021 if (likely(!malloc_slow))
2022 ifree(tsd, ptr, tcache_get(tsd, false), false);
2023 else
2024 ifree(tsd, ptr, tcache_get(tsd, false), true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002025 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans1cb181e2015-01-29 15:30:47 -08002026 }
Jason Evans289053c2009-06-22 12:08:42 -07002027}
2028
2029/*
2030 * End malloc(3)-compatible functions.
2031 */
2032/******************************************************************************/
2033/*
Jason Evans6a0d2912010-09-20 16:44:23 -07002034 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07002035 */
Jason Evans6a0d2912010-09-20 16:44:23 -07002036
2037#ifdef JEMALLOC_OVERRIDE_MEMALIGN
Matthijsc1a6a512015-07-27 22:48:27 +02002038JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2039void JEMALLOC_NOTHROW *
Jason Evansae93d6b2015-07-10 14:33:00 -07002040JEMALLOC_ATTR(malloc)
Jason Evans0a5489e2012-03-01 17:19:20 -08002041je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07002042{
Jason Evans9225a192012-03-23 15:39:07 -07002043 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans44b57b82015-01-16 18:04:17 -08002044 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
2045 ret = NULL;
Jason Evans6a0d2912010-09-20 16:44:23 -07002046 return (ret);
2047}
2048#endif
2049
2050#ifdef JEMALLOC_OVERRIDE_VALLOC
Matthijsc1a6a512015-07-27 22:48:27 +02002051JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2052void JEMALLOC_NOTHROW *
Jason Evansae93d6b2015-07-10 14:33:00 -07002053JEMALLOC_ATTR(malloc)
Jason Evans0a5489e2012-03-01 17:19:20 -08002054je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07002055{
Jason Evans9225a192012-03-23 15:39:07 -07002056 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans44b57b82015-01-16 18:04:17 -08002057 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
2058 ret = NULL;
Jason Evans6a0d2912010-09-20 16:44:23 -07002059 return (ret);
2060}
2061#endif
2062
Mike Hommey5c89c502012-03-26 17:46:57 +02002063/*
2064 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
2065 * #define je_malloc malloc
2066 */
2067#define malloc_is_malloc 1
2068#define is_malloc_(a) malloc_is_ ## a
2069#define is_malloc(a) is_malloc_(a)
2070
Sara Golemon3e24afa2014-08-18 13:06:39 -07002071#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
Jason Evans4bb09832012-02-29 10:37:27 -08002072/*
2073 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2074 * to inconsistently reference libc's malloc(3)-compatible functions
2075 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2076 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02002077 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08002078 * passed an extra argument for the caller return address, which will be
2079 * ignored.
2080 */
Jason Evansa344dd02014-05-01 15:51:30 -07002081JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2082JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2083JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
Sara Golemon3e24afa2014-08-18 13:06:39 -07002084# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
Jason Evansa344dd02014-05-01 15:51:30 -07002085JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
Mike Hommeyda99e312012-04-30 12:38:29 +02002086 je_memalign;
Sara Golemon3e24afa2014-08-18 13:06:39 -07002087# endif
Dave Watsoned847642016-10-28 13:51:52 -07002088
Dave Watson6c56e192016-11-02 18:22:32 -07002089#ifdef CPU_COUNT
Dave Watsoned847642016-10-28 13:51:52 -07002090/*
2091 * To enable static linking with glibc, the libc specific malloc interface must
2092 * be implemented also, so none of glibc's malloc.o functions are added to the
2093 * link.
2094 */
2095#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2096/* To force macro expansion of je_ prefix before stringification. */
2097#define PREALIAS(je_fn) ALIAS(je_fn)
2098void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2099void __libc_free(void* ptr) PREALIAS(je_free);
2100void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2101void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2102void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2103void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2104int __posix_memalign(void** r, size_t a, size_t s)
2105 PREALIAS(je_posix_memalign);
2106#undef PREALIAS
2107#undef ALIAS
Dave Watson6c56e192016-11-02 18:22:32 -07002108
2109#endif
2110
Jason Evans4bb09832012-02-29 10:37:27 -08002111#endif
2112
Jason Evans6a0d2912010-09-20 16:44:23 -07002113/*
2114 * End non-standard override functions.
2115 */
2116/******************************************************************************/
2117/*
Jason Evans289053c2009-06-22 12:08:42 -07002118 * Begin non-standard functions.
2119 */
2120
Jason Evans8bb31982014-10-07 23:14:57 -07002121JEMALLOC_ALWAYS_INLINE_C bool
Jason Evans3ef51d72016-05-06 12:16:00 -07002122imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002123 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
Jason Evansb718cf72014-09-07 14:40:19 -07002124{
2125
2126 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2127 *alignment = 0;
2128 *usize = s2u(size);
2129 } else {
2130 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2131 *usize = sa2u(size, *alignment);
2132 }
Jason Evans0c516a02016-02-25 15:29:49 -08002133 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2134 return (true);
Jason Evansb718cf72014-09-07 14:40:19 -07002135 *zero = MALLOCX_ZERO_GET(flags);
Jason Evans1cb181e2015-01-29 15:30:47 -08002136 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2137 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2138 *tcache = NULL;
2139 else
2140 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2141 } else
2142 *tcache = tcache_get(tsd, true);
Jason Evansb718cf72014-09-07 14:40:19 -07002143 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2144 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002145 *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
Jason Evans8bb31982014-10-07 23:14:57 -07002146 if (unlikely(*arena == NULL))
2147 return (true);
Jason Evans1cb181e2015-01-29 15:30:47 -08002148 } else
Jason Evansb718cf72014-09-07 14:40:19 -07002149 *arena = NULL;
Jason Evans8bb31982014-10-07 23:14:57 -07002150 return (false);
Jason Evansb718cf72014-09-07 14:40:19 -07002151}
2152
Jason Evansd82a5e62013-12-12 22:35:52 -08002153JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002154imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
Jason Evans3ef51d72016-05-06 12:16:00 -07002155 tcache_t *tcache, arena_t *arena, bool slow_path)
Jason Evans289053c2009-06-22 12:08:42 -07002156{
Qi Wangf4a0f322015-10-27 15:12:10 -07002157 szind_t ind;
Jason Evansd82a5e62013-12-12 22:35:52 -08002158
Jason Evans3263be62015-09-17 10:19:28 -07002159 if (unlikely(alignment != 0))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002160 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
Jason Evans9d2c10f2016-02-25 16:42:15 -08002161 ind = size2index(usize);
2162 assert(ind < NSIZES);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002163 return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
Jason Evans3ef51d72016-05-06 12:16:00 -07002164 slow_path));
Jason Evansd82a5e62013-12-12 22:35:52 -08002165}
2166
Jason Evansb2c31662014-01-12 15:05:44 -08002167static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002168imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
Jason Evans3ef51d72016-05-06 12:16:00 -07002169 tcache_t *tcache, arena_t *arena, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08002170{
2171 void *p;
2172
Jason Evans9b0cbf02014-04-11 14:24:51 -07002173 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07002174 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2175 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002176 p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
2177 tcache, arena, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08002178 if (p == NULL)
2179 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002180 arena_prof_promoted(tsdn, p, usize);
Jason Evans3ef51d72016-05-06 12:16:00 -07002181 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002182 p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
Jason Evans3ef51d72016-05-06 12:16:00 -07002183 slow_path);
2184 }
Jason Evansb2c31662014-01-12 15:05:44 -08002185
2186 return (p);
2187}
2188
2189JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans3ef51d72016-05-06 12:16:00 -07002190imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
Jason Evansb2c31662014-01-12 15:05:44 -08002191{
2192 void *p;
Jason Evansb718cf72014-09-07 14:40:19 -07002193 size_t alignment;
2194 bool zero;
Jason Evans1cb181e2015-01-29 15:30:47 -08002195 tcache_t *tcache;
Jason Evansb718cf72014-09-07 14:40:19 -07002196 arena_t *arena;
2197 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002198
Jason Evans8bb31982014-10-07 23:14:57 -07002199 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
Jason Evans1cb181e2015-01-29 15:30:47 -08002200 &zero, &tcache, &arena)))
Jason Evans8bb31982014-10-07 23:14:57 -07002201 return (NULL);
Jason Evanscec0d632015-09-14 23:17:25 -07002202 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002203 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2204 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
2205 tcache, arena, slow_path);
2206 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
2207 p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
2208 tcache, arena, slow_path);
Jason Evansb2c31662014-01-12 15:05:44 -08002209 } else
Jason Evansb718cf72014-09-07 14:40:19 -07002210 p = NULL;
Jason Evans9c640bf2014-09-11 16:20:44 -07002211 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07002212 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08002213 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07002214 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002215 prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002216
Jason Evansdc0610a2015-06-22 18:48:58 -07002217 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
Jason Evansb2c31662014-01-12 15:05:44 -08002218 return (p);
2219}
2220
Jason Evansb718cf72014-09-07 14:40:19 -07002221JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans3ef51d72016-05-06 12:16:00 -07002222imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
2223 bool slow_path)
Jason Evansb718cf72014-09-07 14:40:19 -07002224{
Jason Evansdc0610a2015-06-22 18:48:58 -07002225 void *p;
Jason Evansb718cf72014-09-07 14:40:19 -07002226 size_t alignment;
2227 bool zero;
Jason Evans1cb181e2015-01-29 15:30:47 -08002228 tcache_t *tcache;
Jason Evansb718cf72014-09-07 14:40:19 -07002229 arena_t *arena;
2230
Jason Evans3ef51d72016-05-06 12:16:00 -07002231 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2232 &zero, &tcache, &arena)))
2233 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002234 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
2235 arena, slow_path);
Jason Evans3ef51d72016-05-06 12:16:00 -07002236 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2237 return (p);
2238}
2239
Jason Evansc1e00ef2016-05-10 22:21:10 -07002240/* This function guarantees that *tsdn is non-NULL on success. */
Jason Evans3ef51d72016-05-06 12:16:00 -07002241JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002242imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
Jason Evans3ef51d72016-05-06 12:16:00 -07002243 bool slow_path)
2244{
Jason Evansc1e00ef2016-05-10 22:21:10 -07002245 tsd_t *tsd;
Jason Evans3ef51d72016-05-06 12:16:00 -07002246
2247 if (slow_path && unlikely(malloc_init())) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002248 *tsdn = NULL;
Jason Evans3ef51d72016-05-06 12:16:00 -07002249 return (NULL);
2250 }
2251
Jason Evansc1e00ef2016-05-10 22:21:10 -07002252 tsd = tsd_fetch();
2253 *tsdn = tsd_tsdn(tsd);
2254 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans3ef51d72016-05-06 12:16:00 -07002255
Jason Evans9c640bf2014-09-11 16:20:44 -07002256 if (likely(flags == 0)) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002257 szind_t ind = size2index(size);
Jason Evans0c516a02016-02-25 15:29:49 -08002258 if (unlikely(ind >= NSIZES))
2259 return (NULL);
Jason Evans3ef51d72016-05-06 12:16:00 -07002260 if (config_stats || (config_prof && opt_prof) || (slow_path &&
2261 config_valgrind && unlikely(in_valgrind))) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002262 *usize = index2size(ind);
Jason Evans0c516a02016-02-25 15:29:49 -08002263 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2264 }
Jason Evans3ef51d72016-05-06 12:16:00 -07002265
2266 if (config_prof && opt_prof) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002267 return (ialloc_prof(tsd, *usize, ind, false,
Jason Evans3ef51d72016-05-06 12:16:00 -07002268 slow_path));
2269 }
2270
Jason Evansc1e00ef2016-05-10 22:21:10 -07002271 return (ialloc(tsd, size, ind, false, slow_path));
Jason Evansb718cf72014-09-07 14:40:19 -07002272 }
2273
Jason Evans3ef51d72016-05-06 12:16:00 -07002274 if (config_prof && opt_prof)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002275 return (imallocx_prof(tsd, size, flags, usize, slow_path));
Jason Evans3ef51d72016-05-06 12:16:00 -07002276
Jason Evansc1e00ef2016-05-10 22:21:10 -07002277 return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
Jason Evansb718cf72014-09-07 14:40:19 -07002278}
2279
Matthijsc1a6a512015-07-27 22:48:27 +02002280JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2281void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07002282JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
Jason Evansd82a5e62013-12-12 22:35:52 -08002283je_mallocx(size_t size, int flags)
2284{
Jason Evansc1e00ef2016-05-10 22:21:10 -07002285 tsdn_t *tsdn;
Jason Evansd82a5e62013-12-12 22:35:52 -08002286 void *p;
2287 size_t usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002288
2289 assert(size != 0);
2290
Jason Evans3ef51d72016-05-06 12:16:00 -07002291 if (likely(!malloc_slow)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002292 p = imallocx_body(size, flags, &tsdn, &usize, false);
2293 ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
Jason Evans3ef51d72016-05-06 12:16:00 -07002294 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002295 p = imallocx_body(size, flags, &tsdn, &usize, true);
2296 ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
Jason Evans3ef51d72016-05-06 12:16:00 -07002297 UTRACE(0, size, p);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002298 JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
Jason Evans3ef51d72016-05-06 12:16:00 -07002299 MALLOCX_ZERO_GET(flags));
Jason Evansd82a5e62013-12-12 22:35:52 -08002300 }
Jason Evansd82a5e62013-12-12 22:35:52 -08002301
Jason Evansd82a5e62013-12-12 22:35:52 -08002302 return (p);
Jason Evansd82a5e62013-12-12 22:35:52 -08002303}
2304
Jason Evansb2c31662014-01-12 15:05:44 -08002305static void *
Jason Evans4be9c792015-09-17 10:17:55 -07002306irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2307 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
Jason Evans1cb181e2015-01-29 15:30:47 -08002308 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08002309{
2310 void *p;
2311
Jason Evans602c8e02014-08-18 16:22:13 -07002312 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08002313 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07002314 if (usize <= SMALL_MAXCLASS) {
Jason Evansd9704042015-09-14 23:28:32 -07002315 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
Jason Evans1cb181e2015-01-29 15:30:47 -08002316 zero, tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002317 if (p == NULL)
2318 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002319 arena_prof_promoted(tsd_tsdn(tsd), p, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08002320 } else {
Jason Evans4be9c792015-09-17 10:17:55 -07002321 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002322 tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002323 }
2324
2325 return (p);
2326}
2327
2328JEMALLOC_ALWAYS_INLINE_C void *
Jason Evansd9704042015-09-14 23:28:32 -07002329irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
Jason Evans1cb181e2015-01-29 15:30:47 -08002330 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2331 arena_t *arena)
Jason Evansb2c31662014-01-12 15:05:44 -08002332{
2333 void *p;
Jason Evanscec0d632015-09-14 23:17:25 -07002334 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07002335 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002336
Jason Evanscec0d632015-09-14 23:17:25 -07002337 prof_active = prof_active_get_unlocked();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002338 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
Jason Evansfa09fe72016-06-01 16:19:22 -07002339 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002340 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Jason Evans4be9c792015-09-17 10:17:55 -07002341 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2342 alignment, zero, tcache, arena, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -07002343 } else {
Jason Evansd9704042015-09-14 23:28:32 -07002344 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002345 tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08002346 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002347 if (unlikely(p == NULL)) {
Jason Evansfa09fe72016-06-01 16:19:22 -07002348 prof_alloc_rollback(tsd, tctx, false);
Jason Evansb2c31662014-01-12 15:05:44 -08002349 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07002350 }
Jason Evansb2c31662014-01-12 15:05:44 -08002351
Jason Evansd9704042015-09-14 23:28:32 -07002352 if (p == old_ptr && alignment != 0) {
Jason Evansb2c31662014-01-12 15:05:44 -08002353 /*
2354 * The allocation did not move, so it is possible that the size
2355 * class is smaller than would guarantee the requested
2356 * alignment, and that the alignment constraint was
2357 * serendipitously satisfied. Additionally, old_usize may not
2358 * be the same as the current usize because of in-place large
2359 * reallocation. Therefore, query the actual value of usize.
2360 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002361 *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
Jason Evansb2c31662014-01-12 15:05:44 -08002362 }
Jason Evansfa09fe72016-06-01 16:19:22 -07002363 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
Jason Evanscec0d632015-09-14 23:17:25 -07002364 old_usize, old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002365
2366 return (p);
2367}
2368
Matthijsc1a6a512015-07-27 22:48:27 +02002369JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2370void JEMALLOC_NOTHROW *
Jason Evans00632602015-07-21 08:10:38 -07002371JEMALLOC_ALLOC_SIZE(2)
Jason Evansd82a5e62013-12-12 22:35:52 -08002372je_rallocx(void *ptr, size_t size, int flags)
2373{
2374 void *p;
Jason Evans5460aa62014-09-22 21:09:23 -07002375 tsd_t *tsd;
Jason Evans9c640bf2014-09-11 16:20:44 -07002376 size_t usize;
Daniel Micayd33f8342014-10-24 13:18:57 -04002377 size_t old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002378 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07002379 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002380 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08002381 arena_t *arena;
Jason Evans1cb181e2015-01-29 15:30:47 -08002382 tcache_t *tcache;
Jason Evansd82a5e62013-12-12 22:35:52 -08002383
2384 assert(ptr != NULL);
2385 assert(size != 0);
Jason Evans10aff3f2015-01-20 15:37:51 -08002386 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002387 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002388 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002389 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans5460aa62014-09-22 21:09:23 -07002390
Jason Evans9c640bf2014-09-11 16:20:44 -07002391 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07002392 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002393 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
Jason Evans8bb31982014-10-07 23:14:57 -07002394 if (unlikely(arena == NULL))
2395 goto label_oom;
Jason Evans1cb181e2015-01-29 15:30:47 -08002396 } else
Jason Evansd82a5e62013-12-12 22:35:52 -08002397 arena = NULL;
Jason Evans1cb181e2015-01-29 15:30:47 -08002398
2399 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2400 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2401 tcache = NULL;
2402 else
2403 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2404 } else
2405 tcache = tcache_get(tsd, true);
Jason Evansd82a5e62013-12-12 22:35:52 -08002406
Jason Evansc1e00ef2016-05-10 22:21:10 -07002407 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans9c640bf2014-09-11 16:20:44 -07002408 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002409 old_rzsize = u2rz(old_usize);
2410
Jason Evansd82a5e62013-12-12 22:35:52 -08002411 if (config_prof && opt_prof) {
Jason Evansb2c31662014-01-12 15:05:44 -08002412 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08002413 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2414 goto label_oom;
Jason Evans5460aa62014-09-22 21:09:23 -07002415 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
Jason Evans1cb181e2015-01-29 15:30:47 -08002416 zero, tcache, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002417 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002418 goto label_oom;
Jason Evansd82a5e62013-12-12 22:35:52 -08002419 } else {
Daniel Micayd33f8342014-10-24 13:18:57 -04002420 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
Jason Evans1cb181e2015-01-29 15:30:47 -08002421 tcache, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002422 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002423 goto label_oom;
Jason Evans9c640bf2014-09-11 16:20:44 -07002424 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002425 usize = isalloc(tsd_tsdn(tsd), p, config_prof);
Jason Evansd82a5e62013-12-12 22:35:52 -08002426 }
Jason Evansdc0610a2015-06-22 18:48:58 -07002427 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
Jason Evansd82a5e62013-12-12 22:35:52 -08002428
2429 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002430 *tsd_thread_allocatedp_get(tsd) += usize;
2431 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002432 }
2433 UTRACE(ptr, size, p);
Jason Evans0d6a4722016-11-16 18:53:59 -08002434 JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
2435 old_usize, old_rzsize, no, zero);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002436 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansd82a5e62013-12-12 22:35:52 -08002437 return (p);
2438label_oom:
Jason Evans9c640bf2014-09-11 16:20:44 -07002439 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansd82a5e62013-12-12 22:35:52 -08002440 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2441 abort();
2442 }
2443 UTRACE(ptr, size, 0);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002444 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansd82a5e62013-12-12 22:35:52 -08002445 return (NULL);
2446}
2447
Jason Evansb2c31662014-01-12 15:05:44 -08002448JEMALLOC_ALWAYS_INLINE_C size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07002449ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08002450 size_t extra, size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002451{
2452 size_t usize;
2453
Jason Evansc1e00ef2016-05-10 22:21:10 -07002454 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002455 return (old_usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002456 usize = isalloc(tsdn, ptr, config_prof);
Jason Evansb2c31662014-01-12 15:05:44 -08002457
2458 return (usize);
2459}
2460
2461static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07002462ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08002463 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08002464{
2465 size_t usize;
2466
Jason Evans602c8e02014-08-18 16:22:13 -07002467 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08002468 return (old_usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002469 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
Jason Evans243f7a02016-02-19 20:09:31 -08002470 zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002471
2472 return (usize);
2473}
2474
2475JEMALLOC_ALWAYS_INLINE_C size_t
Jason Evans5460aa62014-09-22 21:09:23 -07002476ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
Daniel Micaydc652132014-10-30 23:23:16 -04002477 size_t extra, size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002478{
Jason Evansce9a4e32015-09-14 23:31:02 -07002479 size_t usize_max, usize;
Jason Evanscec0d632015-09-14 23:17:25 -07002480 bool prof_active;
Jason Evans6e73dc12014-09-09 19:37:26 -07002481 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002482
Jason Evanscec0d632015-09-14 23:17:25 -07002483 prof_active = prof_active_get_unlocked();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002484 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
Jason Evans6e73dc12014-09-09 19:37:26 -07002485 /*
2486 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2487 * Therefore, compute its maximum possible value and use that in
2488 * prof_alloc_prep() to decide whether to capture a backtrace.
2489 * prof_realloc() will use the actual usize to decide whether to sample.
2490 */
Jason Evans9d2c10f2016-02-25 16:42:15 -08002491 if (alignment == 0) {
2492 usize_max = s2u(size+extra);
2493 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2494 } else {
2495 usize_max = sa2u(size+extra, alignment);
2496 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2497 /*
2498 * usize_max is out of range, and chances are that
2499 * allocation will fail, but use the maximum possible
2500 * value and carry on with prof_alloc_prep(), just in
2501 * case allocation succeeds.
2502 */
2503 usize_max = HUGE_MAXCLASS;
2504 }
2505 }
Jason Evansce9a4e32015-09-14 23:31:02 -07002506 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
Jason Evans9d2c10f2016-02-25 16:42:15 -08002507
Jason Evans9c640bf2014-09-11 16:20:44 -07002508 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002509 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2510 size, extra, alignment, zero, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002511 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002512 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2513 extra, alignment, zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002514 }
Jason Evans38e2c8f2015-09-17 10:05:56 -07002515 if (usize == old_usize) {
Jason Evans5460aa62014-09-22 21:09:23 -07002516 prof_alloc_rollback(tsd, tctx, false);
Jason Evansb2c31662014-01-12 15:05:44 -08002517 return (usize);
Jason Evans6e73dc12014-09-09 19:37:26 -07002518 }
Jason Evans708ed792015-09-14 23:48:11 -07002519 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
Jason Evanscec0d632015-09-14 23:17:25 -07002520 old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002521
2522 return (usize);
2523}
2524
Jason Evans00632602015-07-21 08:10:38 -07002525JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002526je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2527{
Jason Evans5460aa62014-09-22 21:09:23 -07002528 tsd_t *tsd;
Jason Evans66576932013-12-15 16:21:30 -08002529 size_t usize, old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002530 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07002531 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002532 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08002533
2534 assert(ptr != NULL);
2535 assert(size != 0);
2536 assert(SIZE_T_MAX - size >= extra);
Jason Evans10aff3f2015-01-20 15:37:51 -08002537 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002538 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002539 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002540 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansd82a5e62013-12-12 22:35:52 -08002541
Jason Evansc1e00ef2016-05-10 22:21:10 -07002542 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans9a505b72015-09-15 14:39:58 -07002543
Jason Evans9d2c10f2016-02-25 16:42:15 -08002544 /*
2545 * The API explicitly absolves itself of protecting against (size +
2546 * extra) numerical overflow, but we may need to clamp extra to avoid
2547 * exceeding HUGE_MAXCLASS.
2548 *
2549 * Ordinarily, size limit checking is handled deeper down, but here we
2550 * have to check as part of (size + extra) clamping, since we need the
2551 * clamped value in the above helper functions.
2552 */
2553 if (unlikely(size > HUGE_MAXCLASS)) {
2554 usize = old_usize;
2555 goto label_not_resized;
Jason Evans9a505b72015-09-15 14:39:58 -07002556 }
Jason Evans9d2c10f2016-02-25 16:42:15 -08002557 if (unlikely(HUGE_MAXCLASS - size < extra))
2558 extra = HUGE_MAXCLASS - size;
Jason Evans9a505b72015-09-15 14:39:58 -07002559
Jason Evans9c640bf2014-09-11 16:20:44 -07002560 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002561 old_rzsize = u2rz(old_usize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002562
2563 if (config_prof && opt_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002564 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002565 alignment, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002566 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002567 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2568 extra, alignment, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002569 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002570 if (unlikely(usize == old_usize))
Jason Evansb2c31662014-01-12 15:05:44 -08002571 goto label_not_resized;
Jason Evansd82a5e62013-12-12 22:35:52 -08002572
2573 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002574 *tsd_thread_allocatedp_get(tsd) += usize;
2575 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002576 }
Jason Evans0d6a4722016-11-16 18:53:59 -08002577 JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
2578 old_usize, old_rzsize, no, zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002579label_not_resized:
Jason Evansd82a5e62013-12-12 22:35:52 -08002580 UTRACE(ptr, size, ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002581 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansd82a5e62013-12-12 22:35:52 -08002582 return (usize);
2583}
2584
Jason Evans00632602015-07-21 08:10:38 -07002585JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2586JEMALLOC_ATTR(pure)
Jason Evansd82a5e62013-12-12 22:35:52 -08002587je_sallocx(const void *ptr, int flags)
2588{
2589 size_t usize;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002590 tsdn_t *tsdn;
Jason Evans289053c2009-06-22 12:08:42 -07002591
Jason Evans10aff3f2015-01-20 15:37:51 -08002592 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08002593 malloc_thread_init();
Jason Evans8e3c3c62010-09-17 15:46:18 -07002594
Jason Evansc1e00ef2016-05-10 22:21:10 -07002595 tsdn = tsdn_fetch();
2596 witness_assert_lockless(tsdn);
Jason Evans289053c2009-06-22 12:08:42 -07002597
Jason Evansb2c0d632016-04-13 23:36:15 -07002598 if (config_ivsalloc)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002599 usize = ivsalloc(tsdn, ptr, config_prof);
Jason Evansb2c0d632016-04-13 23:36:15 -07002600 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002601 usize = isalloc(tsdn, ptr, config_prof);
Jason Evansb2c0d632016-04-13 23:36:15 -07002602
Jason Evansc1e00ef2016-05-10 22:21:10 -07002603 witness_assert_lockless(tsdn);
Jason Evansd82a5e62013-12-12 22:35:52 -08002604 return (usize);
Jason Evans289053c2009-06-22 12:08:42 -07002605}
2606
Jason Evans00632602015-07-21 08:10:38 -07002607JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002608je_dallocx(void *ptr, int flags)
Jason Evans4201af02010-01-24 02:53:40 -08002609{
Jason Evans8bb31982014-10-07 23:14:57 -07002610 tsd_t *tsd;
Jason Evans1cb181e2015-01-29 15:30:47 -08002611 tcache_t *tcache;
Jason Evans4201af02010-01-24 02:53:40 -08002612
Jason Evansd82a5e62013-12-12 22:35:52 -08002613 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08002614 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002615
Jason Evans8bb31982014-10-07 23:14:57 -07002616 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002617 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans1cb181e2015-01-29 15:30:47 -08002618 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2619 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2620 tcache = NULL;
2621 else
2622 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
Jason Evansd82a5e62013-12-12 22:35:52 -08002623 } else
Jason Evans1cb181e2015-01-29 15:30:47 -08002624 tcache = tcache_get(tsd, false);
Jason Evansd82a5e62013-12-12 22:35:52 -08002625
2626 UTRACE(ptr, 0, 0);
Jason Evans3ef51d72016-05-06 12:16:00 -07002627 if (likely(!malloc_slow))
2628 ifree(tsd, ptr, tcache, false);
2629 else
2630 ifree(tsd, ptr, tcache, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002631 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansd82a5e62013-12-12 22:35:52 -08002632}
2633
Jason Evansa2260c92014-09-09 10:29:26 -07002634JEMALLOC_ALWAYS_INLINE_C size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07002635inallocx(tsdn_t *tsdn, size_t size, int flags)
Jason Evansa2260c92014-09-09 10:29:26 -07002636{
2637 size_t usize;
2638
Jason Evansc1e00ef2016-05-10 22:21:10 -07002639 witness_assert_lockless(tsdn);
Jason Evansb2c0d632016-04-13 23:36:15 -07002640
Jason Evans9c640bf2014-09-11 16:20:44 -07002641 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
Jason Evansa2260c92014-09-09 10:29:26 -07002642 usize = s2u(size);
2643 else
2644 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
Jason Evansc1e00ef2016-05-10 22:21:10 -07002645 witness_assert_lockless(tsdn);
Jason Evansa2260c92014-09-09 10:29:26 -07002646 return (usize);
2647}
2648
Jason Evans00632602015-07-21 08:10:38 -07002649JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Daniel Micay4cfe5512014-08-28 15:41:48 -04002650je_sdallocx(void *ptr, size_t size, int flags)
2651{
Jason Evans8bb31982014-10-07 23:14:57 -07002652 tsd_t *tsd;
Jason Evans1cb181e2015-01-29 15:30:47 -08002653 tcache_t *tcache;
Jason Evansa2260c92014-09-09 10:29:26 -07002654 size_t usize;
Daniel Micay4cfe5512014-08-28 15:41:48 -04002655
2656 assert(ptr != NULL);
Jason Evans10aff3f2015-01-20 15:37:51 -08002657 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evans8bb31982014-10-07 23:14:57 -07002658 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002659 usize = inallocx(tsd_tsdn(tsd), size, flags);
2660 assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
Jason Evansb2c0d632016-04-13 23:36:15 -07002661
Jason Evansc1e00ef2016-05-10 22:21:10 -07002662 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evans1cb181e2015-01-29 15:30:47 -08002663 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2664 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2665 tcache = NULL;
2666 else
2667 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
Daniel Micay4cfe5512014-08-28 15:41:48 -04002668 } else
Jason Evans1cb181e2015-01-29 15:30:47 -08002669 tcache = tcache_get(tsd, false);
Daniel Micay4cfe5512014-08-28 15:41:48 -04002670
2671 UTRACE(ptr, 0, 0);
Jason Evans3ef51d72016-05-06 12:16:00 -07002672 if (likely(!malloc_slow))
2673 isfree(tsd, ptr, usize, tcache, false);
2674 else
2675 isfree(tsd, ptr, usize, tcache, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002676 witness_assert_lockless(tsd_tsdn(tsd));
Daniel Micay4cfe5512014-08-28 15:41:48 -04002677}
2678
Jason Evans00632602015-07-21 08:10:38 -07002679JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2680JEMALLOC_ATTR(pure)
Jason Evansd82a5e62013-12-12 22:35:52 -08002681je_nallocx(size_t size, int flags)
2682{
Jason Evans0c516a02016-02-25 15:29:49 -08002683 size_t usize;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002684 tsdn_t *tsdn;
Jason Evansd82a5e62013-12-12 22:35:52 -08002685
2686 assert(size != 0);
2687
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002688 if (unlikely(malloc_init()))
Jason Evansd82a5e62013-12-12 22:35:52 -08002689 return (0);
2690
Jason Evansc1e00ef2016-05-10 22:21:10 -07002691 tsdn = tsdn_fetch();
2692 witness_assert_lockless(tsdn);
Jason Evansb2c0d632016-04-13 23:36:15 -07002693
Jason Evansc1e00ef2016-05-10 22:21:10 -07002694 usize = inallocx(tsdn, size, flags);
Jason Evans0c516a02016-02-25 15:29:49 -08002695 if (unlikely(usize > HUGE_MAXCLASS))
2696 return (0);
2697
Jason Evansc1e00ef2016-05-10 22:21:10 -07002698 witness_assert_lockless(tsdn);
Jason Evans0c516a02016-02-25 15:29:49 -08002699 return (usize);
Jason Evans4201af02010-01-24 02:53:40 -08002700}
2701
Jason Evans00632602015-07-21 08:10:38 -07002702JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002703je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08002704 size_t newlen)
2705{
Jason Evansb2c0d632016-04-13 23:36:15 -07002706 int ret;
2707 tsd_t *tsd;
Jason Evans3c234352010-01-27 13:10:55 -08002708
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002709 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002710 return (EAGAIN);
2711
Jason Evansb2c0d632016-04-13 23:36:15 -07002712 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002713 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07002714 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002715 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07002716 return (ret);
Jason Evans3c234352010-01-27 13:10:55 -08002717}
2718
Jason Evans00632602015-07-21 08:10:38 -07002719JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002720je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08002721{
Jason Evansb2c0d632016-04-13 23:36:15 -07002722 int ret;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002723 tsdn_t *tsdn;
Jason Evans3c234352010-01-27 13:10:55 -08002724
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002725 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002726 return (EAGAIN);
2727
Jason Evansc1e00ef2016-05-10 22:21:10 -07002728 tsdn = tsdn_fetch();
2729 witness_assert_lockless(tsdn);
2730 ret = ctl_nametomib(tsdn, name, mibp, miblenp);
2731 witness_assert_lockless(tsdn);
Jason Evansb2c0d632016-04-13 23:36:15 -07002732 return (ret);
Jason Evans3c234352010-01-27 13:10:55 -08002733}
2734
Jason Evans00632602015-07-21 08:10:38 -07002735JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Jason Evans0a5489e2012-03-01 17:19:20 -08002736je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2737 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08002738{
Jason Evansb2c0d632016-04-13 23:36:15 -07002739 int ret;
2740 tsd_t *tsd;
Jason Evans3c234352010-01-27 13:10:55 -08002741
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002742 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002743 return (EAGAIN);
2744
Jason Evansb2c0d632016-04-13 23:36:15 -07002745 tsd = tsd_fetch();
Jason Evansc1e00ef2016-05-10 22:21:10 -07002746 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07002747 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002748 witness_assert_lockless(tsd_tsdn(tsd));
Jason Evansb2c0d632016-04-13 23:36:15 -07002749 return (ret);
Jason Evans3c234352010-01-27 13:10:55 -08002750}
2751
Jason Evans00632602015-07-21 08:10:38 -07002752JEMALLOC_EXPORT void JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002753je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2754 const char *opts)
2755{
Jason Evansc1e00ef2016-05-10 22:21:10 -07002756 tsdn_t *tsdn;
Jason Evansd82a5e62013-12-12 22:35:52 -08002757
Jason Evansc1e00ef2016-05-10 22:21:10 -07002758 tsdn = tsdn_fetch();
2759 witness_assert_lockless(tsdn);
Jason Evansd82a5e62013-12-12 22:35:52 -08002760 stats_print(write_cb, cbopaque, opts);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002761 witness_assert_lockless(tsdn);
Jason Evansd82a5e62013-12-12 22:35:52 -08002762}
2763
Jason Evans00632602015-07-21 08:10:38 -07002764JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
Jason Evansd82a5e62013-12-12 22:35:52 -08002765je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2766{
2767 size_t ret;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002768 tsdn_t *tsdn;
Jason Evansd82a5e62013-12-12 22:35:52 -08002769
Jason Evans10aff3f2015-01-20 15:37:51 -08002770 assert(malloc_initialized() || IS_INITIALIZER);
Jason Evansd82a5e62013-12-12 22:35:52 -08002771 malloc_thread_init();
2772
Jason Evansc1e00ef2016-05-10 22:21:10 -07002773 tsdn = tsdn_fetch();
2774 witness_assert_lockless(tsdn);
Jason Evansd82a5e62013-12-12 22:35:52 -08002775
Jason Evansb2c0d632016-04-13 23:36:15 -07002776 if (config_ivsalloc)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002777 ret = ivsalloc(tsdn, ptr, config_prof);
Jason Evansb2c0d632016-04-13 23:36:15 -07002778 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002779 ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
Jason Evansb2c0d632016-04-13 23:36:15 -07002780
Jason Evansc1e00ef2016-05-10 22:21:10 -07002781 witness_assert_lockless(tsdn);
Jason Evansd82a5e62013-12-12 22:35:52 -08002782 return (ret);
2783}
2784
Jason Evans7e77eaf2012-03-02 17:47:37 -08002785/*
2786 * End non-standard functions.
2787 */
2788/******************************************************************************/
2789/*
Jason Evans289053c2009-06-22 12:08:42 -07002790 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07002791 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07002792 */
2793
Jason Evans20f1fc92012-10-09 14:46:22 -07002794/*
2795 * If an application creates a thread before doing any allocation in the main
2796 * thread, then calls fork(2) in the main thread followed by memory allocation
2797 * in the child process, a race can occur that results in deadlock within the
2798 * child: the main thread may have forked while the created thread had
2799 * partially initialized the allocator. Ordinarily jemalloc prevents
2800 * fork/malloc races via the following functions it registers during
2801 * initialization using pthread_atfork(), but of course that does no good if
2802 * the allocator isn't fully initialized at fork time. The following library
Jason Evans9b756772014-10-10 18:19:20 -07002803 * constructor is a partial solution to this problem. It may still be possible
2804 * to trigger the deadlock described above, but doing so would involve forking
2805 * via a library constructor that runs before jemalloc's runs.
Jason Evans20f1fc92012-10-09 14:46:22 -07002806 */
Jason Evans0c12dca2016-05-07 12:42:31 -07002807#ifndef JEMALLOC_JET
Jason Evans20f1fc92012-10-09 14:46:22 -07002808JEMALLOC_ATTR(constructor)
2809static void
2810jemalloc_constructor(void)
2811{
2812
2813 malloc_init();
2814}
Jason Evans0c12dca2016-05-07 12:42:31 -07002815#endif
Jason Evans20f1fc92012-10-09 14:46:22 -07002816
Jason Evans41b6afb2012-02-02 22:04:57 -08002817#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002818void
Jason Evans804c9ec2009-06-22 17:44:33 -07002819jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002820#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002821JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002822_malloc_prefork(void)
2823#endif
Jason Evans289053c2009-06-22 12:08:42 -07002824{
Jason Evansb2c0d632016-04-13 23:36:15 -07002825 tsd_t *tsd;
Jason Evans174c0c32016-04-25 23:14:40 -07002826 unsigned i, j, narenas;
2827 arena_t *arena;
Jason Evans289053c2009-06-22 12:08:42 -07002828
Jason Evans58ad1e42012-05-11 17:40:16 -07002829#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans10aff3f2015-01-20 15:37:51 -08002830 if (!malloc_initialized())
Jason Evans58ad1e42012-05-11 17:40:16 -07002831 return;
2832#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08002833 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002834
Jason Evansb2c0d632016-04-13 23:36:15 -07002835 tsd = tsd_fetch();
Jason Evans767d8502016-02-24 23:58:10 -08002836
Jason Evans174c0c32016-04-25 23:14:40 -07002837 narenas = narenas_total_get();
Jason Evans767d8502016-02-24 23:58:10 -08002838
Jason Evans174c0c32016-04-25 23:14:40 -07002839 witness_prefork(tsd);
Jason Evans108c4a12016-04-26 10:47:22 -07002840 /* Acquire all mutexes in a safe order. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002841 ctl_prefork(tsd_tsdn(tsd));
2842 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
2843 prof_prefork0(tsd_tsdn(tsd));
Jason Evans174c0c32016-04-25 23:14:40 -07002844 for (i = 0; i < 3; i++) {
2845 for (j = 0; j < narenas; j++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002846 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
2847 NULL) {
Jason Evans174c0c32016-04-25 23:14:40 -07002848 switch (i) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002849 case 0:
2850 arena_prefork0(tsd_tsdn(tsd), arena);
2851 break;
2852 case 1:
2853 arena_prefork1(tsd_tsdn(tsd), arena);
2854 break;
2855 case 2:
2856 arena_prefork2(tsd_tsdn(tsd), arena);
2857 break;
Jason Evans174c0c32016-04-25 23:14:40 -07002858 default: not_reached();
2859 }
2860 }
2861 }
Jason Evansfbbb6242010-01-24 17:56:48 -08002862 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002863 base_prefork(tsd_tsdn(tsd));
Jason Evans174c0c32016-04-25 23:14:40 -07002864 for (i = 0; i < narenas; i++) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002865 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2866 arena_prefork3(tsd_tsdn(tsd), arena);
Jason Evans174c0c32016-04-25 23:14:40 -07002867 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002868 prof_prefork1(tsd_tsdn(tsd));
Jason Evans289053c2009-06-22 12:08:42 -07002869}
2870
Jason Evans41b6afb2012-02-02 22:04:57 -08002871#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002872void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002873jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002874#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002875JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002876_malloc_postfork(void)
2877#endif
Jason Evans289053c2009-06-22 12:08:42 -07002878{
Jason Evansb2c0d632016-04-13 23:36:15 -07002879 tsd_t *tsd;
Jason Evans767d8502016-02-24 23:58:10 -08002880 unsigned i, narenas;
Jason Evans289053c2009-06-22 12:08:42 -07002881
Jason Evans58ad1e42012-05-11 17:40:16 -07002882#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans10aff3f2015-01-20 15:37:51 -08002883 if (!malloc_initialized())
Jason Evans58ad1e42012-05-11 17:40:16 -07002884 return;
2885#endif
Jason Evans10aff3f2015-01-20 15:37:51 -08002886 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002887
Jason Evansb2c0d632016-04-13 23:36:15 -07002888 tsd = tsd_fetch();
2889
Jason Evans108c4a12016-04-26 10:47:22 -07002890 witness_postfork_parent(tsd);
Jason Evans289053c2009-06-22 12:08:42 -07002891 /* Release all mutexes, now that fork() has completed. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002892 base_postfork_parent(tsd_tsdn(tsd));
Jason Evans767d8502016-02-24 23:58:10 -08002893 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2894 arena_t *arena;
2895
Jason Evansc1e00ef2016-05-10 22:21:10 -07002896 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2897 arena_postfork_parent(tsd_tsdn(tsd), arena);
Jason Evans289053c2009-06-22 12:08:42 -07002898 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002899 prof_postfork_parent(tsd_tsdn(tsd));
2900 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
2901 ctl_postfork_parent(tsd_tsdn(tsd));
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002902}
2903
2904void
2905jemalloc_postfork_child(void)
2906{
Jason Evansb2c0d632016-04-13 23:36:15 -07002907 tsd_t *tsd;
Jason Evans767d8502016-02-24 23:58:10 -08002908 unsigned i, narenas;
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002909
Jason Evans10aff3f2015-01-20 15:37:51 -08002910 assert(malloc_initialized());
Jason Evans58ad1e42012-05-11 17:40:16 -07002911
Jason Evansb2c0d632016-04-13 23:36:15 -07002912 tsd = tsd_fetch();
2913
Jason Evans108c4a12016-04-26 10:47:22 -07002914 witness_postfork_child(tsd);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002915 /* Release all mutexes, now that fork() has completed. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002916 base_postfork_child(tsd_tsdn(tsd));
Jason Evans767d8502016-02-24 23:58:10 -08002917 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2918 arena_t *arena;
2919
Jason Evansc1e00ef2016-05-10 22:21:10 -07002920 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
2921 arena_postfork_child(tsd_tsdn(tsd), arena);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002922 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002923 prof_postfork_child(tsd_tsdn(tsd));
2924 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
2925 ctl_postfork_child(tsd_tsdn(tsd));
Jason Evans289053c2009-06-22 12:08:42 -07002926}
Jason Evans2dbecf12010-09-05 10:35:13 -07002927
2928/******************************************************************************/
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07002929
2930/* ANDROID extension */
Colin Cross6ab5f602015-12-29 16:56:53 -08002931#include "android_je_iterate.c"
Christopher Ferris6f50cbc2015-09-09 12:17:01 -07002932#include "android_je_mallinfo.c"
2933/* End ANDROID extension */