blob: b38722770d9a565a6466b1d131ea8f279ea721fa [file] [log] [blame]
Jason Evans6109fe02010-02-10 10:37:56 -08001#define JEMALLOC_PROF_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans6109fe02010-02-10 10:37:56 -08003/******************************************************************************/
4
5#ifdef JEMALLOC_PROF_LIBUNWIND
6#define UNW_LOCAL_ONLY
7#include <libunwind.h>
8#endif
9
Jason Evans77f350b2011-03-15 22:23:12 -070010#ifdef JEMALLOC_PROF_LIBGCC
11#include <unwind.h>
12#endif
13
Jason Evans6109fe02010-02-10 10:37:56 -080014/******************************************************************************/
15/* Data. */
16
17bool opt_prof = false;
Jason Evansf18c9822010-03-31 18:43:24 -070018bool opt_prof_active = true;
Jason Evansfc12c0b2014-10-03 23:25:30 -070019bool opt_prof_thread_active_init = true;
Jason Evansb9477e72010-03-01 20:15:26 -080020size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
Jason Evansa02fc082010-03-31 17:35:51 -070021ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
Jason Evanse7339702010-10-23 18:37:06 -070022bool opt_prof_gdump = false;
Jason Evans57efa7b2014-10-08 17:57:19 -070023bool opt_prof_final = false;
Jason Evans6109fe02010-02-10 10:37:56 -080024bool opt_prof_leak = false;
Jason Evans0b25fe72012-04-17 16:39:33 -070025bool opt_prof_accum = false;
Jason Evans4f37ef62014-01-16 13:23:56 -080026char opt_prof_prefix[
27 /* Minimize memory bloat for non-prof builds. */
28#ifdef JEMALLOC_PROF
29 PATH_MAX +
30#endif
Jason Evanseefdd022014-01-16 18:04:30 -080031 1];
Jason Evans6109fe02010-02-10 10:37:56 -080032
Jason Evansfc12c0b2014-10-03 23:25:30 -070033/*
34 * Initialized as opt_prof_active, and accessed via
35 * prof_active_[gs]et{_unlocked,}().
36 */
37bool prof_active;
38static malloc_mutex_t prof_active_mtx;
39
40/*
41 * Initialized as opt_prof_thread_active_init, and accessed via
42 * prof_thread_active_init_[gs]et().
43 */
44static bool prof_thread_active_init;
45static malloc_mutex_t prof_thread_active_init_mtx;
46
Jason Evans5b8ed5b2015-01-25 21:16:57 -080047/*
48 * Initialized as opt_prof_gdump, and accessed via
49 * prof_gdump_[gs]et{_unlocked,}().
50 */
51bool prof_gdump_val;
52static malloc_mutex_t prof_gdump_mtx;
53
Jason Evansa3b33862012-11-13 12:56:27 -080054uint64_t prof_interval = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -080055
Jason Evans602c8e02014-08-18 16:22:13 -070056size_t lg_prof_sample;
57
Jason Evans6109fe02010-02-10 10:37:56 -080058/*
Jason Evans602c8e02014-08-18 16:22:13 -070059 * Table of mutexes that are shared among gctx's. These are leaf locks, so
60 * there is no problem with using them for more than one gctx at the same time.
61 * The primary motivation for this sharing though is that gctx's are ephemeral,
Jason Evans6da54182012-03-23 18:05:51 -070062 * and destroying mutexes causes complications for systems that allocate when
63 * creating/destroying mutexes.
64 */
Jason Evans602c8e02014-08-18 16:22:13 -070065static malloc_mutex_t *gctx_locks;
66static unsigned cum_gctxs; /* Atomic counter. */
Jason Evans6da54182012-03-23 18:05:51 -070067
68/*
Jason Evans602c8e02014-08-18 16:22:13 -070069 * Table of mutexes that are shared among tdata's. No operations require
70 * holding multiple tdata locks, so there is no problem with using them for more
71 * than one tdata at the same time, even though a gctx lock may be acquired
72 * while holding a tdata lock.
73 */
74static malloc_mutex_t *tdata_locks;
75
76/*
77 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
Jason Evansa881cd22010-10-02 15:18:50 -070078 * structure that knows about all backtraces currently captured.
Jason Evans6109fe02010-02-10 10:37:56 -080079 */
Jason Evans602c8e02014-08-18 16:22:13 -070080static ckh_t bt2gctx;
81static malloc_mutex_t bt2gctx_mtx;
82
83/*
84 * Tree of all extant prof_tdata_t structures, regardless of state,
85 * {attached,detached,expired}.
86 */
87static prof_tdata_tree_t tdatas;
88static malloc_mutex_t tdatas_mtx;
89
90static uint64_t next_thr_uid;
Jason Evans9d8f3d22014-09-11 18:06:30 -070091static malloc_mutex_t next_thr_uid_mtx;
Jason Evans6109fe02010-02-10 10:37:56 -080092
Jason Evans6109fe02010-02-10 10:37:56 -080093static malloc_mutex_t prof_dump_seq_mtx;
94static uint64_t prof_dump_seq;
95static uint64_t prof_dump_iseq;
96static uint64_t prof_dump_mseq;
97static uint64_t prof_dump_useq;
98
99/*
100 * This buffer is rather large for stack allocation, so use a single buffer for
Jason Evans4f37ef62014-01-16 13:23:56 -0800101 * all profile dumps.
Jason Evans6109fe02010-02-10 10:37:56 -0800102 */
Jason Evans4f37ef62014-01-16 13:23:56 -0800103static malloc_mutex_t prof_dump_mtx;
104static char prof_dump_buf[
105 /* Minimize memory bloat for non-prof builds. */
106#ifdef JEMALLOC_PROF
107 PROF_DUMP_BUFSIZE
108#else
109 1
110#endif
111];
Christopher Ferrise4294032016-03-02 14:33:02 -0800112static size_t prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -0800113static int prof_dump_fd;
114
115/* Do not dump any profiles until bootstrapping is complete. */
116static bool prof_booted = false;
117
Jason Evans6109fe02010-02-10 10:37:56 -0800118/******************************************************************************/
Jason Evans602c8e02014-08-18 16:22:13 -0700119/*
120 * Function prototypes for static functions that are referenced prior to
121 * definition.
122 */
123
124static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
Jason Evans5460aa62014-09-22 21:09:23 -0700125static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
Jason Evansf04a0be2014-10-04 15:03:49 -0700126static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
127 bool even_if_attached);
128static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
129 bool even_if_attached);
Jason Evansfc12c0b2014-10-03 23:25:30 -0700130static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -0700131
132/******************************************************************************/
133/* Red-black trees. */
Jason Evans6109fe02010-02-10 10:37:56 -0800134
Jason Evans3a81cbd2014-08-16 12:58:55 -0700135JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700136prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700137{
Jason Evans04211e22015-03-16 15:11:06 -0700138 uint64_t a_thr_uid = a->thr_uid;
139 uint64_t b_thr_uid = b->thr_uid;
140 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
Jason Evansd69964b2015-03-12 16:25:18 -0700141 if (ret == 0) {
Jason Evansa00b1072015-09-09 23:16:10 -0700142 uint64_t a_thr_discrim = a->thr_discrim;
143 uint64_t b_thr_discrim = b->thr_discrim;
144 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
145 b_thr_discrim);
146 if (ret == 0) {
147 uint64_t a_tctx_uid = a->tctx_uid;
148 uint64_t b_tctx_uid = b->tctx_uid;
149 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
150 b_tctx_uid);
151 }
Jason Evansd69964b2015-03-12 16:25:18 -0700152 }
153 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -0700154}
155
Jason Evans602c8e02014-08-18 16:22:13 -0700156rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
157 tctx_link, prof_tctx_comp)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700158
159JEMALLOC_INLINE_C int
Jason Evans602c8e02014-08-18 16:22:13 -0700160prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
Jason Evans3a81cbd2014-08-16 12:58:55 -0700161{
162 unsigned a_len = a->bt.len;
163 unsigned b_len = b->bt.len;
164 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
165 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
166 if (ret == 0)
167 ret = (a_len > b_len) - (a_len < b_len);
168 return (ret);
169}
170
Jason Evans602c8e02014-08-18 16:22:13 -0700171rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
172 prof_gctx_comp)
173
174JEMALLOC_INLINE_C int
175prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
176{
Jason Evans20c31de2014-10-02 23:01:10 -0700177 int ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700178 uint64_t a_uid = a->thr_uid;
179 uint64_t b_uid = b->thr_uid;
180
Jason Evans20c31de2014-10-02 23:01:10 -0700181 ret = ((a_uid > b_uid) - (a_uid < b_uid));
182 if (ret == 0) {
183 uint64_t a_discrim = a->thr_discrim;
184 uint64_t b_discrim = b->thr_discrim;
185
186 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
187 }
188 return (ret);
Jason Evans602c8e02014-08-18 16:22:13 -0700189}
190
191rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
192 prof_tdata_comp)
193
194/******************************************************************************/
195
196void
Jason Evans5460aa62014-09-22 21:09:23 -0700197prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
Jason Evans6e73dc12014-09-09 19:37:26 -0700198{
199 prof_tdata_t *tdata;
200
201 cassert(config_prof);
202
203 if (updated) {
204 /*
205 * Compute a new sample threshold. This isn't very important in
206 * practice, because this function is rarely executed, so the
207 * potential for sample bias is minimal except in contrived
208 * programs.
209 */
Jason Evans5460aa62014-09-22 21:09:23 -0700210 tdata = prof_tdata_get(tsd, true);
211 if (tdata != NULL)
Jason Evans3ca0cf62015-09-17 14:47:39 -0700212 prof_sample_threshold_update(tdata);
Jason Evans6e73dc12014-09-09 19:37:26 -0700213 }
214
215 if ((uintptr_t)tctx > (uintptr_t)1U) {
216 malloc_mutex_lock(tctx->tdata->lock);
217 tctx->prepared = false;
218 if (prof_tctx_should_destroy(tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700219 prof_tctx_destroy(tsd, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -0700220 else
221 malloc_mutex_unlock(tctx->tdata->lock);
222 }
223}
224
225void
Jason Evanscfc57062014-10-30 23:18:45 -0700226prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
227{
228
Jason Evans594c7592015-09-02 14:52:24 -0700229 prof_tctx_set(ptr, usize, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700230
231 malloc_mutex_lock(tctx->tdata->lock);
232 tctx->cnts.curobjs++;
233 tctx->cnts.curbytes += usize;
234 if (opt_prof_accum) {
235 tctx->cnts.accumobjs++;
236 tctx->cnts.accumbytes += usize;
237 }
Jason Evans6e73dc12014-09-09 19:37:26 -0700238 tctx->prepared = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700239 malloc_mutex_unlock(tctx->tdata->lock);
240}
241
242void
Jason Evans5460aa62014-09-22 21:09:23 -0700243prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700244{
245
246 malloc_mutex_lock(tctx->tdata->lock);
247 assert(tctx->cnts.curobjs > 0);
248 assert(tctx->cnts.curbytes >= usize);
249 tctx->cnts.curobjs--;
250 tctx->cnts.curbytes -= usize;
251
252 if (prof_tctx_should_destroy(tctx))
Jason Evans5460aa62014-09-22 21:09:23 -0700253 prof_tctx_destroy(tsd, tctx);
Jason Evans602c8e02014-08-18 16:22:13 -0700254 else
255 malloc_mutex_unlock(tctx->tdata->lock);
256}
Jason Evans3a81cbd2014-08-16 12:58:55 -0700257
Jason Evans4d6a1342010-10-20 19:05:59 -0700258void
Jason Evans6109fe02010-02-10 10:37:56 -0800259bt_init(prof_bt_t *bt, void **vec)
260{
261
Jason Evans7372b152012-02-10 20:22:09 -0800262 cassert(config_prof);
263
Jason Evans6109fe02010-02-10 10:37:56 -0800264 bt->vec = vec;
265 bt->len = 0;
266}
267
Jason Evansaf1f5922014-10-30 16:38:08 -0700268JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700269prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800270{
271
Jason Evans7372b152012-02-10 20:22:09 -0800272 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700273 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800274
Jason Evans82cb6032014-11-01 00:20:28 -0700275 if (tdata != NULL) {
276 assert(!tdata->enq);
277 tdata->enq = true;
278 }
Jason Evans6109fe02010-02-10 10:37:56 -0800279
Jason Evans602c8e02014-08-18 16:22:13 -0700280 malloc_mutex_lock(&bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800281}
282
Jason Evansaf1f5922014-10-30 16:38:08 -0700283JEMALLOC_INLINE_C void
Jason Evansc93ed812014-10-30 16:50:33 -0700284prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans6109fe02010-02-10 10:37:56 -0800285{
Jason Evans6109fe02010-02-10 10:37:56 -0800286
Jason Evans7372b152012-02-10 20:22:09 -0800287 cassert(config_prof);
Jason Evansc93ed812014-10-30 16:50:33 -0700288 assert(tdata == prof_tdata_get(tsd, false));
Jason Evans7372b152012-02-10 20:22:09 -0800289
Jason Evans602c8e02014-08-18 16:22:13 -0700290 malloc_mutex_unlock(&bt2gctx_mtx);
Jason Evans6109fe02010-02-10 10:37:56 -0800291
Jason Evans82cb6032014-11-01 00:20:28 -0700292 if (tdata != NULL) {
293 bool idump, gdump;
Jason Evans6109fe02010-02-10 10:37:56 -0800294
Jason Evans82cb6032014-11-01 00:20:28 -0700295 assert(tdata->enq);
296 tdata->enq = false;
297 idump = tdata->enq_idump;
298 tdata->enq_idump = false;
299 gdump = tdata->enq_gdump;
300 tdata->enq_gdump = false;
301
302 if (idump)
303 prof_idump();
304 if (gdump)
305 prof_gdump();
306 }
Jason Evans6109fe02010-02-10 10:37:56 -0800307}
308
Jason Evans77f350b2011-03-15 22:23:12 -0700309#ifdef JEMALLOC_PROF_LIBUNWIND
Jason Evans4d6a1342010-10-20 19:05:59 -0700310void
Jason Evans6f001052014-04-22 18:41:15 -0700311prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800312{
Jason Evans6f001052014-04-22 18:41:15 -0700313 int nframes;
314
Jason Evans7372b152012-02-10 20:22:09 -0800315 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800316 assert(bt->len == 0);
317 assert(bt->vec != NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800318
Jason Evans6f001052014-04-22 18:41:15 -0700319 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
320 if (nframes <= 0)
Lucian Adrian Grijincu9d4e13f2014-04-21 20:52:35 -0700321 return;
Jason Evans6f001052014-04-22 18:41:15 -0700322 bt->len = nframes;
Jason Evans6109fe02010-02-10 10:37:56 -0800323}
Jason Evans7372b152012-02-10 20:22:09 -0800324#elif (defined(JEMALLOC_PROF_LIBGCC))
Jason Evans77f350b2011-03-15 22:23:12 -0700325static _Unwind_Reason_Code
326prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
327{
328
Jason Evans7372b152012-02-10 20:22:09 -0800329 cassert(config_prof);
330
Jason Evans77f350b2011-03-15 22:23:12 -0700331 return (_URC_NO_REASON);
332}
333
334static _Unwind_Reason_Code
335prof_unwind_callback(struct _Unwind_Context *context, void *arg)
336{
337 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
Jason Evans6f001052014-04-22 18:41:15 -0700338 void *ip;
Jason Evans77f350b2011-03-15 22:23:12 -0700339
Jason Evans7372b152012-02-10 20:22:09 -0800340 cassert(config_prof);
341
Jason Evans6f001052014-04-22 18:41:15 -0700342 ip = (void *)_Unwind_GetIP(context);
343 if (ip == NULL)
344 return (_URC_END_OF_STACK);
345 data->bt->vec[data->bt->len] = ip;
346 data->bt->len++;
347 if (data->bt->len == data->max)
348 return (_URC_END_OF_STACK);
Jason Evans77f350b2011-03-15 22:23:12 -0700349
350 return (_URC_NO_REASON);
351}
352
353void
Jason Evans6f001052014-04-22 18:41:15 -0700354prof_backtrace(prof_bt_t *bt)
Jason Evans77f350b2011-03-15 22:23:12 -0700355{
Jason Evans6f001052014-04-22 18:41:15 -0700356 prof_unwind_data_t data = {bt, PROF_BT_MAX};
Jason Evans77f350b2011-03-15 22:23:12 -0700357
Jason Evans7372b152012-02-10 20:22:09 -0800358 cassert(config_prof);
359
Jason Evans77f350b2011-03-15 22:23:12 -0700360 _Unwind_Backtrace(prof_unwind_callback, &data);
361}
Jason Evans7372b152012-02-10 20:22:09 -0800362#elif (defined(JEMALLOC_PROF_GCC))
Jason Evans4d6a1342010-10-20 19:05:59 -0700363void
Jason Evans6f001052014-04-22 18:41:15 -0700364prof_backtrace(prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800365{
Jason Evans6109fe02010-02-10 10:37:56 -0800366#define BT_FRAME(i) \
Jason Evans6f001052014-04-22 18:41:15 -0700367 if ((i) < PROF_BT_MAX) { \
Jason Evans6109fe02010-02-10 10:37:56 -0800368 void *p; \
369 if (__builtin_frame_address(i) == 0) \
Jason Evansb27805b2010-02-10 18:15:53 -0800370 return; \
Jason Evans6109fe02010-02-10 10:37:56 -0800371 p = __builtin_return_address(i); \
372 if (p == NULL) \
Jason Evansb27805b2010-02-10 18:15:53 -0800373 return; \
Jason Evans6f001052014-04-22 18:41:15 -0700374 bt->vec[(i)] = p; \
375 bt->len = (i) + 1; \
Jason Evans6109fe02010-02-10 10:37:56 -0800376 } else \
Jason Evansb27805b2010-02-10 18:15:53 -0800377 return;
Jason Evans6109fe02010-02-10 10:37:56 -0800378
Jason Evans7372b152012-02-10 20:22:09 -0800379 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -0800380
Jason Evans6109fe02010-02-10 10:37:56 -0800381 BT_FRAME(0)
382 BT_FRAME(1)
383 BT_FRAME(2)
Jason Evans6109fe02010-02-10 10:37:56 -0800384 BT_FRAME(3)
385 BT_FRAME(4)
386 BT_FRAME(5)
387 BT_FRAME(6)
388 BT_FRAME(7)
389 BT_FRAME(8)
390 BT_FRAME(9)
391
392 BT_FRAME(10)
393 BT_FRAME(11)
394 BT_FRAME(12)
395 BT_FRAME(13)
396 BT_FRAME(14)
397 BT_FRAME(15)
398 BT_FRAME(16)
399 BT_FRAME(17)
400 BT_FRAME(18)
401 BT_FRAME(19)
402
403 BT_FRAME(20)
404 BT_FRAME(21)
405 BT_FRAME(22)
406 BT_FRAME(23)
407 BT_FRAME(24)
408 BT_FRAME(25)
409 BT_FRAME(26)
410 BT_FRAME(27)
411 BT_FRAME(28)
412 BT_FRAME(29)
413
414 BT_FRAME(30)
415 BT_FRAME(31)
416 BT_FRAME(32)
417 BT_FRAME(33)
418 BT_FRAME(34)
419 BT_FRAME(35)
420 BT_FRAME(36)
421 BT_FRAME(37)
422 BT_FRAME(38)
423 BT_FRAME(39)
424
425 BT_FRAME(40)
426 BT_FRAME(41)
427 BT_FRAME(42)
428 BT_FRAME(43)
429 BT_FRAME(44)
430 BT_FRAME(45)
431 BT_FRAME(46)
432 BT_FRAME(47)
433 BT_FRAME(48)
434 BT_FRAME(49)
435
436 BT_FRAME(50)
437 BT_FRAME(51)
438 BT_FRAME(52)
439 BT_FRAME(53)
440 BT_FRAME(54)
441 BT_FRAME(55)
442 BT_FRAME(56)
443 BT_FRAME(57)
444 BT_FRAME(58)
445 BT_FRAME(59)
446
447 BT_FRAME(60)
448 BT_FRAME(61)
449 BT_FRAME(62)
450 BT_FRAME(63)
451 BT_FRAME(64)
452 BT_FRAME(65)
453 BT_FRAME(66)
454 BT_FRAME(67)
455 BT_FRAME(68)
456 BT_FRAME(69)
457
458 BT_FRAME(70)
459 BT_FRAME(71)
460 BT_FRAME(72)
461 BT_FRAME(73)
462 BT_FRAME(74)
463 BT_FRAME(75)
464 BT_FRAME(76)
465 BT_FRAME(77)
466 BT_FRAME(78)
467 BT_FRAME(79)
468
469 BT_FRAME(80)
470 BT_FRAME(81)
471 BT_FRAME(82)
472 BT_FRAME(83)
473 BT_FRAME(84)
474 BT_FRAME(85)
475 BT_FRAME(86)
476 BT_FRAME(87)
477 BT_FRAME(88)
478 BT_FRAME(89)
479
480 BT_FRAME(90)
481 BT_FRAME(91)
482 BT_FRAME(92)
483 BT_FRAME(93)
484 BT_FRAME(94)
485 BT_FRAME(95)
486 BT_FRAME(96)
487 BT_FRAME(97)
488 BT_FRAME(98)
489 BT_FRAME(99)
490
491 BT_FRAME(100)
492 BT_FRAME(101)
493 BT_FRAME(102)
494 BT_FRAME(103)
495 BT_FRAME(104)
496 BT_FRAME(105)
497 BT_FRAME(106)
498 BT_FRAME(107)
499 BT_FRAME(108)
500 BT_FRAME(109)
501
502 BT_FRAME(110)
503 BT_FRAME(111)
504 BT_FRAME(112)
505 BT_FRAME(113)
506 BT_FRAME(114)
507 BT_FRAME(115)
508 BT_FRAME(116)
509 BT_FRAME(117)
510 BT_FRAME(118)
511 BT_FRAME(119)
512
513 BT_FRAME(120)
514 BT_FRAME(121)
515 BT_FRAME(122)
516 BT_FRAME(123)
517 BT_FRAME(124)
518 BT_FRAME(125)
519 BT_FRAME(126)
520 BT_FRAME(127)
Jason Evans6109fe02010-02-10 10:37:56 -0800521#undef BT_FRAME
Jason Evans6109fe02010-02-10 10:37:56 -0800522}
Jason Evans7372b152012-02-10 20:22:09 -0800523#else
524void
Jason Evans6f001052014-04-22 18:41:15 -0700525prof_backtrace(prof_bt_t *bt)
Jason Evans7372b152012-02-10 20:22:09 -0800526{
527
528 cassert(config_prof);
Jason Evans6556e282013-10-21 14:56:27 -0700529 not_reached();
Jason Evans7372b152012-02-10 20:22:09 -0800530}
Jason Evans6109fe02010-02-10 10:37:56 -0800531#endif
532
Jason Evans4f37ef62014-01-16 13:23:56 -0800533static malloc_mutex_t *
Jason Evans602c8e02014-08-18 16:22:13 -0700534prof_gctx_mutex_choose(void)
Jason Evans4f37ef62014-01-16 13:23:56 -0800535{
Jason Evans602c8e02014-08-18 16:22:13 -0700536 unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
Jason Evans4f37ef62014-01-16 13:23:56 -0800537
Jason Evans602c8e02014-08-18 16:22:13 -0700538 return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
Jason Evans4f37ef62014-01-16 13:23:56 -0800539}
540
Jason Evans602c8e02014-08-18 16:22:13 -0700541static malloc_mutex_t *
542prof_tdata_mutex_choose(uint64_t thr_uid)
543{
544
545 return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
546}
547
548static prof_gctx_t *
Jason Evans5460aa62014-09-22 21:09:23 -0700549prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
Jason Evans4f37ef62014-01-16 13:23:56 -0800550{
Jason Evansab532e92014-08-15 15:05:12 -0700551 /*
552 * Create a single allocation that has space for vec of length bt->len.
553 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700554 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
555 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
556 size2index(size), false, tcache_get(tsd, true), true, NULL, true);
Jason Evans602c8e02014-08-18 16:22:13 -0700557 if (gctx == NULL)
Jason Evansab532e92014-08-15 15:05:12 -0700558 return (NULL);
Jason Evans602c8e02014-08-18 16:22:13 -0700559 gctx->lock = prof_gctx_mutex_choose();
Jason Evans4f37ef62014-01-16 13:23:56 -0800560 /*
561 * Set nlimbo to 1, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700562 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evans4f37ef62014-01-16 13:23:56 -0800563 */
Jason Evans602c8e02014-08-18 16:22:13 -0700564 gctx->nlimbo = 1;
565 tctx_tree_new(&gctx->tctxs);
Jason Evansab532e92014-08-15 15:05:12 -0700566 /* Duplicate bt. */
Jason Evans602c8e02014-08-18 16:22:13 -0700567 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
568 gctx->bt.vec = gctx->vec;
569 gctx->bt.len = bt->len;
570 return (gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -0800571}
572
573static void
Jason Evansc93ed812014-10-30 16:50:33 -0700574prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
575 prof_tdata_t *tdata)
Jason Evans4f37ef62014-01-16 13:23:56 -0800576{
Jason Evans4f37ef62014-01-16 13:23:56 -0800577
578 cassert(config_prof);
579
580 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700581 * Check that gctx is still unused by any thread cache before destroying
582 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
583 * condition with this function, as does prof_tctx_destroy() in order to
584 * avoid a race between the main body of prof_tctx_destroy() and entry
Jason Evans4f37ef62014-01-16 13:23:56 -0800585 * into this function.
586 */
Jason Evansc93ed812014-10-30 16:50:33 -0700587 prof_enter(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700588 malloc_mutex_lock(gctx->lock);
Jason Evans20c31de2014-10-02 23:01:10 -0700589 assert(gctx->nlimbo != 0);
Jason Evans602c8e02014-08-18 16:22:13 -0700590 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
591 /* Remove gctx from bt2gctx. */
Jason Evans5460aa62014-09-22 21:09:23 -0700592 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
Jason Evans4f37ef62014-01-16 13:23:56 -0800593 not_reached();
Jason Evansc93ed812014-10-30 16:50:33 -0700594 prof_leave(tsd, tdata_self);
Jason Evans602c8e02014-08-18 16:22:13 -0700595 /* Destroy gctx. */
596 malloc_mutex_unlock(gctx->lock);
Qi Wangf4a0f322015-10-27 15:12:10 -0700597 idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800598 } else {
599 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700600 * Compensate for increment in prof_tctx_destroy() or
Jason Evans4f37ef62014-01-16 13:23:56 -0800601 * prof_lookup().
602 */
Jason Evans602c8e02014-08-18 16:22:13 -0700603 gctx->nlimbo--;
604 malloc_mutex_unlock(gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700605 prof_leave(tsd, tdata_self);
Jason Evans4f37ef62014-01-16 13:23:56 -0800606 }
607}
608
Jason Evans602c8e02014-08-18 16:22:13 -0700609/* tctx->tdata->lock must be held. */
610static bool
611prof_tctx_should_destroy(prof_tctx_t *tctx)
Jason Evans4f37ef62014-01-16 13:23:56 -0800612{
Jason Evans4f37ef62014-01-16 13:23:56 -0800613
Jason Evans602c8e02014-08-18 16:22:13 -0700614 if (opt_prof_accum)
615 return (false);
616 if (tctx->cnts.curobjs != 0)
617 return (false);
Jason Evans6e73dc12014-09-09 19:37:26 -0700618 if (tctx->prepared)
619 return (false);
Jason Evans602c8e02014-08-18 16:22:13 -0700620 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -0800621}
622
Jason Evansfb1775e2014-01-14 17:04:34 -0800623static bool
Jason Evans602c8e02014-08-18 16:22:13 -0700624prof_gctx_should_destroy(prof_gctx_t *gctx)
625{
626
627 if (opt_prof_accum)
628 return (false);
Jason Evans551ebc42014-10-03 10:16:09 -0700629 if (!tctx_tree_empty(&gctx->tctxs))
Jason Evans602c8e02014-08-18 16:22:13 -0700630 return (false);
631 if (gctx->nlimbo != 0)
632 return (false);
633 return (true);
634}
635
636/* tctx->tdata->lock is held upon entry, and released before return. */
637static void
Jason Evans5460aa62014-09-22 21:09:23 -0700638prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
Jason Evans602c8e02014-08-18 16:22:13 -0700639{
Jason Evans6fd53da2014-09-09 12:45:53 -0700640 prof_tdata_t *tdata = tctx->tdata;
Jason Evans602c8e02014-08-18 16:22:13 -0700641 prof_gctx_t *gctx = tctx->gctx;
Jason Evansbf406412014-10-06 16:35:11 -0700642 bool destroy_tdata, destroy_tctx, destroy_gctx;
Jason Evans602c8e02014-08-18 16:22:13 -0700643
644 assert(tctx->cnts.curobjs == 0);
645 assert(tctx->cnts.curbytes == 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700646 assert(!opt_prof_accum);
Jason Evans602c8e02014-08-18 16:22:13 -0700647 assert(tctx->cnts.accumobjs == 0);
648 assert(tctx->cnts.accumbytes == 0);
649
Jason Evans5460aa62014-09-22 21:09:23 -0700650 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
Jason Evansf04a0be2014-10-04 15:03:49 -0700651 destroy_tdata = prof_tdata_should_destroy(tdata, false);
Jason Evans6fd53da2014-09-09 12:45:53 -0700652 malloc_mutex_unlock(tdata->lock);
Jason Evans602c8e02014-08-18 16:22:13 -0700653
654 malloc_mutex_lock(gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -0700655 switch (tctx->state) {
Jason Evans04211e22015-03-16 15:11:06 -0700656 case prof_tctx_state_nominal:
Jason Evansbf406412014-10-06 16:35:11 -0700657 tctx_tree_remove(&gctx->tctxs, tctx);
658 destroy_tctx = true;
659 if (prof_gctx_should_destroy(gctx)) {
660 /*
661 * Increment gctx->nlimbo in order to keep another
662 * thread from winning the race to destroy gctx while
663 * this one has gctx->lock dropped. Without this, it
664 * would be possible for another thread to:
665 *
666 * 1) Sample an allocation associated with gctx.
667 * 2) Deallocate the sampled object.
668 * 3) Successfully prof_gctx_try_destroy(gctx).
669 *
670 * The result would be that gctx no longer exists by the
671 * time this thread accesses it in
672 * prof_gctx_try_destroy().
673 */
674 gctx->nlimbo++;
675 destroy_gctx = true;
676 } else
677 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700678 break;
679 case prof_tctx_state_dumping:
Jason Evans602c8e02014-08-18 16:22:13 -0700680 /*
Jason Evansbf406412014-10-06 16:35:11 -0700681 * A dumping thread needs tctx to remain valid until dumping
682 * has finished. Change state such that the dumping thread will
683 * complete destruction during a late dump iteration phase.
Jason Evans602c8e02014-08-18 16:22:13 -0700684 */
Jason Evansbf406412014-10-06 16:35:11 -0700685 tctx->state = prof_tctx_state_purgatory;
686 destroy_tctx = false;
Jason Evans602c8e02014-08-18 16:22:13 -0700687 destroy_gctx = false;
Jason Evans764b0002015-03-14 14:01:35 -0700688 break;
689 default:
690 not_reached();
Jason Evans262146d2015-03-14 14:34:16 -0700691 destroy_tctx = false;
692 destroy_gctx = false;
Jason Evansbf406412014-10-06 16:35:11 -0700693 }
Jason Evans602c8e02014-08-18 16:22:13 -0700694 malloc_mutex_unlock(gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -0700695 if (destroy_gctx) {
696 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
697 tdata);
698 }
Jason Evans6fd53da2014-09-09 12:45:53 -0700699
700 if (destroy_tdata)
Jason Evansf04a0be2014-10-04 15:03:49 -0700701 prof_tdata_destroy(tsd, tdata, false);
Jason Evans602c8e02014-08-18 16:22:13 -0700702
Jason Evansbf406412014-10-06 16:35:11 -0700703 if (destroy_tctx)
Qi Wangf4a0f322015-10-27 15:12:10 -0700704 idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
Jason Evans602c8e02014-08-18 16:22:13 -0700705}
706
707static bool
Jason Evans5460aa62014-09-22 21:09:23 -0700708prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
709 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
Jason Evansfb1775e2014-01-14 17:04:34 -0800710{
711 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700712 prof_gctx_t *p;
Jason Evansfb1775e2014-01-14 17:04:34 -0800713 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -0700714 } gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800715 union {
716 prof_bt_t *p;
717 void *v;
718 } btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700719 bool new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800720
Jason Evansc93ed812014-10-30 16:50:33 -0700721 prof_enter(tsd, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -0700722 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800723 /* bt has never been seen before. Insert it. */
Jason Evans5460aa62014-09-22 21:09:23 -0700724 gctx.p = prof_gctx_create(tsd, bt);
Jason Evans602c8e02014-08-18 16:22:13 -0700725 if (gctx.v == NULL) {
Jason Evansc93ed812014-10-30 16:50:33 -0700726 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800727 return (true);
728 }
Jason Evans602c8e02014-08-18 16:22:13 -0700729 btkey.p = &gctx.p->bt;
Jason Evans5460aa62014-09-22 21:09:23 -0700730 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
Jason Evansfb1775e2014-01-14 17:04:34 -0800731 /* OOM. */
Jason Evansc93ed812014-10-30 16:50:33 -0700732 prof_leave(tsd, tdata);
Qi Wangf4a0f322015-10-27 15:12:10 -0700733 idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
734 true);
Jason Evansfb1775e2014-01-14 17:04:34 -0800735 return (true);
736 }
Jason Evans602c8e02014-08-18 16:22:13 -0700737 new_gctx = true;
Jason Evansfb1775e2014-01-14 17:04:34 -0800738 } else {
739 /*
740 * Increment nlimbo, in order to avoid a race condition with
Jason Evans20c31de2014-10-02 23:01:10 -0700741 * prof_tctx_destroy()/prof_gctx_try_destroy().
Jason Evansfb1775e2014-01-14 17:04:34 -0800742 */
Jason Evans602c8e02014-08-18 16:22:13 -0700743 malloc_mutex_lock(gctx.p->lock);
744 gctx.p->nlimbo++;
745 malloc_mutex_unlock(gctx.p->lock);
746 new_gctx = false;
Jason Evansfb1775e2014-01-14 17:04:34 -0800747 }
Jason Evansc93ed812014-10-30 16:50:33 -0700748 prof_leave(tsd, tdata);
Jason Evansfb1775e2014-01-14 17:04:34 -0800749
750 *p_btkey = btkey.v;
Jason Evans602c8e02014-08-18 16:22:13 -0700751 *p_gctx = gctx.p;
752 *p_new_gctx = new_gctx;
Jason Evansfb1775e2014-01-14 17:04:34 -0800753 return (false);
754}
755
Jason Evans602c8e02014-08-18 16:22:13 -0700756prof_tctx_t *
Jason Evans5460aa62014-09-22 21:09:23 -0700757prof_lookup(tsd_t *tsd, prof_bt_t *bt)
Jason Evans6109fe02010-02-10 10:37:56 -0800758{
Jason Evans075e77c2010-09-20 19:53:25 -0700759 union {
Jason Evans602c8e02014-08-18 16:22:13 -0700760 prof_tctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -0700761 void *v;
762 } ret;
Jason Evans602c8e02014-08-18 16:22:13 -0700763 prof_tdata_t *tdata;
764 bool not_found;
Jason Evans6109fe02010-02-10 10:37:56 -0800765
Jason Evans7372b152012-02-10 20:22:09 -0800766 cassert(config_prof);
767
Jason Evans5460aa62014-09-22 21:09:23 -0700768 tdata = prof_tdata_get(tsd, false);
769 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -0700770 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800771
Jason Evans602c8e02014-08-18 16:22:13 -0700772 malloc_mutex_lock(tdata->lock);
773 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
Jason Evans6e73dc12014-09-09 19:37:26 -0700774 if (!not_found) /* Note double negative! */
775 ret.p->prepared = true;
Jason Evans602c8e02014-08-18 16:22:13 -0700776 malloc_mutex_unlock(tdata->lock);
777 if (not_found) {
Jason Evans1cb181e2015-01-29 15:30:47 -0800778 tcache_t *tcache;
Jason Evansfb1775e2014-01-14 17:04:34 -0800779 void *btkey;
Jason Evans602c8e02014-08-18 16:22:13 -0700780 prof_gctx_t *gctx;
781 bool new_gctx, error;
Jason Evans6109fe02010-02-10 10:37:56 -0800782
783 /*
784 * This thread's cache lacks bt. Look for it in the global
785 * cache.
786 */
Jason Evans5460aa62014-09-22 21:09:23 -0700787 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
Jason Evans602c8e02014-08-18 16:22:13 -0700788 &new_gctx))
Jason Evansfb1775e2014-01-14 17:04:34 -0800789 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800790
Jason Evans602c8e02014-08-18 16:22:13 -0700791 /* Link a prof_tctx_t into gctx for this thread. */
Jason Evans1cb181e2015-01-29 15:30:47 -0800792 tcache = tcache_get(tsd, true);
Qi Wangf4a0f322015-10-27 15:12:10 -0700793 ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
794 size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
795 true);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700796 if (ret.p == NULL) {
Jason Evans602c8e02014-08-18 16:22:13 -0700797 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700798 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evansb41ccdb2014-08-15 15:01:15 -0700799 return (NULL);
Jason Evansa881cd22010-10-02 15:18:50 -0700800 }
Jason Evans602c8e02014-08-18 16:22:13 -0700801 ret.p->tdata = tdata;
Jason Evans44c97b72014-10-12 13:03:20 -0700802 ret.p->thr_uid = tdata->thr_uid;
Jason Evansa00b1072015-09-09 23:16:10 -0700803 ret.p->thr_discrim = tdata->thr_discrim;
Jason Evans075e77c2010-09-20 19:53:25 -0700804 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -0700805 ret.p->gctx = gctx;
Jason Evans04211e22015-03-16 15:11:06 -0700806 ret.p->tctx_uid = tdata->tctx_uid_next++;
Jason Evans6e73dc12014-09-09 19:37:26 -0700807 ret.p->prepared = true;
Jason Evans6ef80d62014-09-24 22:14:21 -0700808 ret.p->state = prof_tctx_state_initializing;
Jason Evans602c8e02014-08-18 16:22:13 -0700809 malloc_mutex_lock(tdata->lock);
Jason Evans5460aa62014-09-22 21:09:23 -0700810 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
Jason Evans602c8e02014-08-18 16:22:13 -0700811 malloc_mutex_unlock(tdata->lock);
812 if (error) {
813 if (new_gctx)
Jason Evansc93ed812014-10-30 16:50:33 -0700814 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Qi Wangf4a0f322015-10-27 15:12:10 -0700815 idalloctm(tsd, ret.v, tcache, true, true);
Jason Evans6109fe02010-02-10 10:37:56 -0800816 return (NULL);
817 }
Jason Evans602c8e02014-08-18 16:22:13 -0700818 malloc_mutex_lock(gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -0700819 ret.p->state = prof_tctx_state_nominal;
Jason Evans602c8e02014-08-18 16:22:13 -0700820 tctx_tree_insert(&gctx->tctxs, ret.p);
821 gctx->nlimbo--;
822 malloc_mutex_unlock(gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -0800823 }
824
Jason Evans075e77c2010-09-20 19:53:25 -0700825 return (ret.p);
Jason Evans6109fe02010-02-10 10:37:56 -0800826}
827
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700828void
Jason Evans602c8e02014-08-18 16:22:13 -0700829prof_sample_threshold_update(prof_tdata_t *tdata)
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700830{
831 /*
832 * The body of this function is compiled out unless heap profiling is
833 * enabled, so that it is possible to compile jemalloc with floating
834 * point support completely disabled. Avoiding floating point code is
835 * important on memory-constrained systems, but it also enables a
836 * workaround for versions of glibc that don't properly save/restore
837 * floating point registers during dynamic lazy symbol loading (which
838 * internally calls into whatever malloc implementation happens to be
839 * integrated into the application). Note that some compilers (e.g.
840 * gcc 4.8) may use floating point registers for fast memory moves, so
841 * jemalloc must be compiled with such optimizations disabled (e.g.
842 * -mno-sse) in order for the workaround to be complete.
843 */
844#ifdef JEMALLOC_PROF
845 uint64_t r;
846 double u;
847
848 if (!config_prof)
849 return;
850
Jason Evans602c8e02014-08-18 16:22:13 -0700851 if (lg_prof_sample == 0) {
852 tdata->bytes_until_sample = 0;
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700853 return;
854 }
855
856 /*
Jason Evans602c8e02014-08-18 16:22:13 -0700857 * Compute sample interval as a geometrically distributed random
858 * variable with mean (2^lg_prof_sample).
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700859 *
Jason Evans602c8e02014-08-18 16:22:13 -0700860 * __ __
861 * | log(u) | 1
862 * tdata->bytes_until_sample = | -------- |, where p = ---------------
863 * | log(1-p) | lg_prof_sample
864 * 2
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700865 *
866 * For more information on the math, see:
867 *
868 * Non-Uniform Random Variate Generation
869 * Luc Devroye
870 * Springer-Verlag, New York, 1986
871 * pp 500
872 * (http://luc.devroye.org/rnbookindex.html)
873 */
Christopher Ferrise4294032016-03-02 14:33:02 -0800874 r = prng_lg_range(&tdata->prng_state, 53);
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700875 u = (double)r * (1.0/9007199254740992.0L);
Jason Evans602c8e02014-08-18 16:22:13 -0700876 tdata->bytes_until_sample = (uint64_t)(log(u) /
877 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
Ben Maurer6c39f9e2014-04-15 13:47:13 -0700878 + (uint64_t)1U;
879#endif
880}
881
Jason Evans772163b2014-01-17 15:40:52 -0800882#ifdef JEMALLOC_JET
Jason Evans20c31de2014-10-02 23:01:10 -0700883static prof_tdata_t *
884prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
885{
886 size_t *tdata_count = (size_t *)arg;
887
888 (*tdata_count)++;
889
890 return (NULL);
891}
892
893size_t
894prof_tdata_count(void)
895{
896 size_t tdata_count = 0;
897
898 malloc_mutex_lock(&tdatas_mtx);
899 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
900 (void *)&tdata_count);
901 malloc_mutex_unlock(&tdatas_mtx);
902
903 return (tdata_count);
904}
905#endif
906
907#ifdef JEMALLOC_JET
Jason Evans772163b2014-01-17 15:40:52 -0800908size_t
909prof_bt_count(void)
910{
911 size_t bt_count;
Jason Evans5460aa62014-09-22 21:09:23 -0700912 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -0700913 prof_tdata_t *tdata;
Jason Evans772163b2014-01-17 15:40:52 -0800914
Jason Evans029d44c2014-10-04 11:12:53 -0700915 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -0700916 tdata = prof_tdata_get(tsd, false);
917 if (tdata == NULL)
Jason Evans772163b2014-01-17 15:40:52 -0800918 return (0);
919
Jason Evansc93ed812014-10-30 16:50:33 -0700920 malloc_mutex_lock(&bt2gctx_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -0700921 bt_count = ckh_count(&bt2gctx);
Jason Evansc93ed812014-10-30 16:50:33 -0700922 malloc_mutex_unlock(&bt2gctx_mtx);
Jason Evans772163b2014-01-17 15:40:52 -0800923
924 return (bt_count);
925}
926#endif
927
928#ifdef JEMALLOC_JET
929#undef prof_dump_open
930#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
931#endif
932static int
Jason Evans4f37ef62014-01-16 13:23:56 -0800933prof_dump_open(bool propagate_err, const char *filename)
934{
Jason Evans772163b2014-01-17 15:40:52 -0800935 int fd;
Jason Evans4f37ef62014-01-16 13:23:56 -0800936
Jason Evans772163b2014-01-17 15:40:52 -0800937 fd = creat(filename, 0644);
Jason Evans551ebc42014-10-03 10:16:09 -0700938 if (fd == -1 && !propagate_err) {
Jason Evans772163b2014-01-17 15:40:52 -0800939 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
940 filename);
941 if (opt_abort)
942 abort();
Jason Evans4f37ef62014-01-16 13:23:56 -0800943 }
944
Jason Evans772163b2014-01-17 15:40:52 -0800945 return (fd);
Jason Evans4f37ef62014-01-16 13:23:56 -0800946}
Jason Evans772163b2014-01-17 15:40:52 -0800947#ifdef JEMALLOC_JET
948#undef prof_dump_open
949#define prof_dump_open JEMALLOC_N(prof_dump_open)
950prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
951#endif
Jason Evans4f37ef62014-01-16 13:23:56 -0800952
953static bool
954prof_dump_flush(bool propagate_err)
Jason Evans6109fe02010-02-10 10:37:56 -0800955{
Jason Evans22ca8552010-03-02 11:57:30 -0800956 bool ret = false;
Jason Evans6109fe02010-02-10 10:37:56 -0800957 ssize_t err;
958
Jason Evans7372b152012-02-10 20:22:09 -0800959 cassert(config_prof);
960
Jason Evans6109fe02010-02-10 10:37:56 -0800961 err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
962 if (err == -1) {
Jason Evans551ebc42014-10-03 10:16:09 -0700963 if (!propagate_err) {
Jason Evans698805c2010-03-03 17:45:38 -0800964 malloc_write("<jemalloc>: write() failed during heap "
965 "profile flush\n");
Jason Evans22ca8552010-03-02 11:57:30 -0800966 if (opt_abort)
967 abort();
968 }
969 ret = true;
Jason Evans6109fe02010-02-10 10:37:56 -0800970 }
971 prof_dump_buf_end = 0;
Jason Evans22ca8552010-03-02 11:57:30 -0800972
973 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800974}
975
Jason Evans22ca8552010-03-02 11:57:30 -0800976static bool
Jason Evans4f37ef62014-01-16 13:23:56 -0800977prof_dump_close(bool propagate_err)
978{
979 bool ret;
980
981 assert(prof_dump_fd != -1);
982 ret = prof_dump_flush(propagate_err);
983 close(prof_dump_fd);
984 prof_dump_fd = -1;
985
986 return (ret);
987}
988
989static bool
990prof_dump_write(bool propagate_err, const char *s)
Jason Evans6109fe02010-02-10 10:37:56 -0800991{
Christopher Ferrise4294032016-03-02 14:33:02 -0800992 size_t i, slen, n;
Jason Evans6109fe02010-02-10 10:37:56 -0800993
Jason Evans7372b152012-02-10 20:22:09 -0800994 cassert(config_prof);
995
Jason Evans6109fe02010-02-10 10:37:56 -0800996 i = 0;
997 slen = strlen(s);
998 while (i < slen) {
999 /* Flush the buffer if it is full. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001000 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
Jason Evans4f37ef62014-01-16 13:23:56 -08001001 if (prof_dump_flush(propagate_err) && propagate_err)
Jason Evans22ca8552010-03-02 11:57:30 -08001002 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001003
Jason Evanscd9a1342012-03-21 18:33:03 -07001004 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
Jason Evans6109fe02010-02-10 10:37:56 -08001005 /* Finish writing. */
1006 n = slen - i;
1007 } else {
1008 /* Write as much of s as will fit. */
Jason Evanscd9a1342012-03-21 18:33:03 -07001009 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
Jason Evans6109fe02010-02-10 10:37:56 -08001010 }
1011 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1012 prof_dump_buf_end += n;
1013 i += n;
1014 }
Jason Evans22ca8552010-03-02 11:57:30 -08001015
1016 return (false);
Jason Evans6109fe02010-02-10 10:37:56 -08001017}
1018
Jason Evanse42c3092015-07-22 15:44:47 -07001019JEMALLOC_FORMAT_PRINTF(2, 3)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001020static bool
Jason Evans4f37ef62014-01-16 13:23:56 -08001021prof_dump_printf(bool propagate_err, const char *format, ...)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001022{
1023 bool ret;
1024 va_list ap;
Jason Evanscd9a1342012-03-21 18:33:03 -07001025 char buf[PROF_PRINTF_BUFSIZE];
Jason Evansd81e4bd2012-03-06 14:57:45 -08001026
1027 va_start(ap, format);
Jason Evans6da54182012-03-23 18:05:51 -07001028 malloc_vsnprintf(buf, sizeof(buf), format, ap);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001029 va_end(ap);
Jason Evans4f37ef62014-01-16 13:23:56 -08001030 ret = prof_dump_write(propagate_err, buf);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001031
1032 return (ret);
1033}
1034
Jason Evans602c8e02014-08-18 16:22:13 -07001035/* tctx->tdata->lock is held. */
1036static void
1037prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001038{
Jason Evans3a81cbd2014-08-16 12:58:55 -07001039
Jason Evans6ef80d62014-09-24 22:14:21 -07001040 malloc_mutex_lock(tctx->gctx->lock);
Jason Evans764b0002015-03-14 14:01:35 -07001041
1042 switch (tctx->state) {
1043 case prof_tctx_state_initializing:
Jason Evans6ef80d62014-09-24 22:14:21 -07001044 malloc_mutex_unlock(tctx->gctx->lock);
1045 return;
Jason Evans764b0002015-03-14 14:01:35 -07001046 case prof_tctx_state_nominal:
1047 tctx->state = prof_tctx_state_dumping;
1048 malloc_mutex_unlock(tctx->gctx->lock);
Jason Evans6ef80d62014-09-24 22:14:21 -07001049
Jason Evans764b0002015-03-14 14:01:35 -07001050 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
Jason Evans3a81cbd2014-08-16 12:58:55 -07001051
Jason Evans764b0002015-03-14 14:01:35 -07001052 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1053 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1054 if (opt_prof_accum) {
1055 tdata->cnt_summed.accumobjs +=
1056 tctx->dump_cnts.accumobjs;
1057 tdata->cnt_summed.accumbytes +=
1058 tctx->dump_cnts.accumbytes;
1059 }
1060 break;
1061 case prof_tctx_state_dumping:
1062 case prof_tctx_state_purgatory:
1063 not_reached();
Jason Evans602c8e02014-08-18 16:22:13 -07001064 }
1065}
1066
1067/* gctx->lock is held. */
1068static void
1069prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
1070{
1071
1072 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1073 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1074 if (opt_prof_accum) {
1075 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1076 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1077 }
1078}
1079
1080/* tctx->gctx is held. */
1081static prof_tctx_t *
1082prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1083{
1084
1085 switch (tctx->state) {
1086 case prof_tctx_state_nominal:
1087 /* New since dumping started; ignore. */
1088 break;
1089 case prof_tctx_state_dumping:
1090 case prof_tctx_state_purgatory:
1091 prof_tctx_merge_gctx(tctx, tctx->gctx);
1092 break;
1093 default:
1094 not_reached();
Jason Evans3a81cbd2014-08-16 12:58:55 -07001095 }
1096
1097 return (NULL);
1098}
1099
Jason Evans602c8e02014-08-18 16:22:13 -07001100/* gctx->lock is held. */
1101static prof_tctx_t *
1102prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1103{
1104 bool propagate_err = *(bool *)arg;
1105
Jason Evansfb64ec22015-09-21 18:37:18 -07001106 switch (tctx->state) {
1107 case prof_tctx_state_initializing:
1108 case prof_tctx_state_nominal:
1109 /* Not captured by this dump. */
1110 break;
1111 case prof_tctx_state_dumping:
1112 case prof_tctx_state_purgatory:
1113 if (prof_dump_printf(propagate_err,
1114 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1115 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1116 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1117 tctx->dump_cnts.accumbytes))
1118 return (tctx);
1119 break;
1120 default:
1121 not_reached();
1122 }
Jason Evans602c8e02014-08-18 16:22:13 -07001123 return (NULL);
1124}
1125
1126/* tctx->gctx is held. */
1127static prof_tctx_t *
1128prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
1129{
1130 prof_tctx_t *ret;
1131
1132 switch (tctx->state) {
1133 case prof_tctx_state_nominal:
1134 /* New since dumping started; ignore. */
1135 break;
1136 case prof_tctx_state_dumping:
1137 tctx->state = prof_tctx_state_nominal;
1138 break;
1139 case prof_tctx_state_purgatory:
Jason Evans20c31de2014-10-02 23:01:10 -07001140 ret = tctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001141 goto label_return;
1142 default:
1143 not_reached();
1144 }
1145
1146 ret = NULL;
1147label_return:
1148 return (ret);
1149}
1150
Jason Evans6109fe02010-02-10 10:37:56 -08001151static void
Jason Evans602c8e02014-08-18 16:22:13 -07001152prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001153{
Jason Evans6109fe02010-02-10 10:37:56 -08001154
Jason Evans7372b152012-02-10 20:22:09 -08001155 cassert(config_prof);
1156
Jason Evans602c8e02014-08-18 16:22:13 -07001157 malloc_mutex_lock(gctx->lock);
Jason Evans6109fe02010-02-10 10:37:56 -08001158
Jason Evans4f37ef62014-01-16 13:23:56 -08001159 /*
Jason Evans602c8e02014-08-18 16:22:13 -07001160 * Increment nlimbo so that gctx won't go away before dump.
1161 * Additionally, link gctx into the dump list so that it is included in
Jason Evans4f37ef62014-01-16 13:23:56 -08001162 * prof_dump()'s second pass.
1163 */
Jason Evans602c8e02014-08-18 16:22:13 -07001164 gctx->nlimbo++;
1165 gctx_tree_insert(gctxs, gctx);
Jason Evans4f37ef62014-01-16 13:23:56 -08001166
Jason Evans602c8e02014-08-18 16:22:13 -07001167 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans6109fe02010-02-10 10:37:56 -08001168
Jason Evans602c8e02014-08-18 16:22:13 -07001169 malloc_mutex_unlock(gctx->lock);
1170}
Jason Evans9ce3bfd2010-10-02 22:39:59 -07001171
Jason Evans602c8e02014-08-18 16:22:13 -07001172static prof_gctx_t *
1173prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
1174{
1175 size_t *leak_ngctx = (size_t *)arg;
Jason Evans6109fe02010-02-10 10:37:56 -08001176
Jason Evans602c8e02014-08-18 16:22:13 -07001177 malloc_mutex_lock(gctx->lock);
1178 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
1179 if (gctx->cnt_summed.curobjs != 0)
1180 (*leak_ngctx)++;
1181 malloc_mutex_unlock(gctx->lock);
1182
1183 return (NULL);
1184}
1185
Jason Evans20c31de2014-10-02 23:01:10 -07001186static void
1187prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
Jason Evans602c8e02014-08-18 16:22:13 -07001188{
Jason Evans5460aa62014-09-22 21:09:23 -07001189 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001190 prof_gctx_t *gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001191
Jason Evans20c31de2014-10-02 23:01:10 -07001192 /*
1193 * Standard tree iteration won't work here, because as soon as we
1194 * decrement gctx->nlimbo and unlock gctx, another thread can
1195 * concurrently destroy it, which will corrupt the tree. Therefore,
1196 * tear down the tree one node at a time during iteration.
1197 */
1198 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1199 gctx_tree_remove(gctxs, gctx);
1200 malloc_mutex_lock(gctx->lock);
1201 {
1202 prof_tctx_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001203
Jason Evans20c31de2014-10-02 23:01:10 -07001204 next = NULL;
1205 do {
1206 prof_tctx_t *to_destroy =
1207 tctx_tree_iter(&gctx->tctxs, next,
1208 prof_tctx_finish_iter, NULL);
1209 if (to_destroy != NULL) {
1210 next = tctx_tree_next(&gctx->tctxs,
1211 to_destroy);
1212 tctx_tree_remove(&gctx->tctxs,
1213 to_destroy);
Jason Evans1cb181e2015-01-29 15:30:47 -08001214 idalloctm(tsd, to_destroy,
Qi Wangf4a0f322015-10-27 15:12:10 -07001215 tcache_get(tsd, false), true, true);
Jason Evans20c31de2014-10-02 23:01:10 -07001216 } else
1217 next = NULL;
1218 } while (next != NULL);
1219 }
1220 gctx->nlimbo--;
1221 if (prof_gctx_should_destroy(gctx)) {
1222 gctx->nlimbo++;
1223 malloc_mutex_unlock(gctx->lock);
Jason Evansc93ed812014-10-30 16:50:33 -07001224 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
Jason Evans20c31de2014-10-02 23:01:10 -07001225 } else
1226 malloc_mutex_unlock(gctx->lock);
1227 }
Jason Evans602c8e02014-08-18 16:22:13 -07001228}
1229
1230static prof_tdata_t *
1231prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1232{
1233 prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
1234
1235 malloc_mutex_lock(tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001236 if (!tdata->expired) {
Jason Evans602c8e02014-08-18 16:22:13 -07001237 size_t tabind;
1238 union {
1239 prof_tctx_t *p;
1240 void *v;
1241 } tctx;
1242
1243 tdata->dumping = true;
1244 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
Jason Evans551ebc42014-10-03 10:16:09 -07001245 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1246 &tctx.v);)
Jason Evans602c8e02014-08-18 16:22:13 -07001247 prof_tctx_merge_tdata(tctx.p, tdata);
1248
1249 cnt_all->curobjs += tdata->cnt_summed.curobjs;
1250 cnt_all->curbytes += tdata->cnt_summed.curbytes;
1251 if (opt_prof_accum) {
1252 cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
1253 cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
1254 }
1255 } else
1256 tdata->dumping = false;
1257 malloc_mutex_unlock(tdata->lock);
1258
1259 return (NULL);
1260}
1261
1262static prof_tdata_t *
1263prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1264{
1265 bool propagate_err = *(bool *)arg;
1266
Jason Evans551ebc42014-10-03 10:16:09 -07001267 if (!tdata->dumping)
Jason Evans602c8e02014-08-18 16:22:13 -07001268 return (NULL);
1269
1270 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001271 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001272 tdata->thr_uid, tdata->cnt_summed.curobjs,
1273 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1274 tdata->cnt_summed.accumbytes,
1275 (tdata->thread_name != NULL) ? " " : "",
1276 (tdata->thread_name != NULL) ? tdata->thread_name : ""))
1277 return (tdata);
1278 return (NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001279}
1280
Jason Evans20c31de2014-10-02 23:01:10 -07001281#ifdef JEMALLOC_JET
1282#undef prof_dump_header
1283#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
1284#endif
Jason Evans4f37ef62014-01-16 13:23:56 -08001285static bool
1286prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
Jason Evansa881cd22010-10-02 15:18:50 -07001287{
Jason Evans602c8e02014-08-18 16:22:13 -07001288 bool ret;
Jason Evansa881cd22010-10-02 15:18:50 -07001289
Jason Evans602c8e02014-08-18 16:22:13 -07001290 if (prof_dump_printf(propagate_err,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001291 "heap_v2/%"FMTu64"\n"
1292 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001293 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1294 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
1295 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001296
Jason Evans602c8e02014-08-18 16:22:13 -07001297 malloc_mutex_lock(&tdatas_mtx);
1298 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1299 (void *)&propagate_err) != NULL);
1300 malloc_mutex_unlock(&tdatas_mtx);
1301 return (ret);
Jason Evansa881cd22010-10-02 15:18:50 -07001302}
Jason Evans20c31de2014-10-02 23:01:10 -07001303#ifdef JEMALLOC_JET
1304#undef prof_dump_header
1305#define prof_dump_header JEMALLOC_N(prof_dump_header)
1306prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
1307#endif
Jason Evansa881cd22010-10-02 15:18:50 -07001308
Jason Evans602c8e02014-08-18 16:22:13 -07001309/* gctx->lock is held. */
Jason Evans22ca8552010-03-02 11:57:30 -08001310static bool
Jason Evans602c8e02014-08-18 16:22:13 -07001311prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
1312 prof_gctx_tree_t *gctxs)
Jason Evans6109fe02010-02-10 10:37:56 -08001313{
Jason Evans4f37ef62014-01-16 13:23:56 -08001314 bool ret;
Jason Evans6109fe02010-02-10 10:37:56 -08001315 unsigned i;
1316
Jason Evans7372b152012-02-10 20:22:09 -08001317 cassert(config_prof);
1318
Jason Evans602c8e02014-08-18 16:22:13 -07001319 /* Avoid dumping such gctx's that have no useful data. */
Jason Evans551ebc42014-10-03 10:16:09 -07001320 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
Jason Evans602c8e02014-08-18 16:22:13 -07001321 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1322 assert(gctx->cnt_summed.curobjs == 0);
1323 assert(gctx->cnt_summed.curbytes == 0);
1324 assert(gctx->cnt_summed.accumobjs == 0);
1325 assert(gctx->cnt_summed.accumbytes == 0);
Jason Evans4f37ef62014-01-16 13:23:56 -08001326 ret = false;
1327 goto label_return;
Jason Evansa881cd22010-10-02 15:18:50 -07001328 }
1329
Jason Evans602c8e02014-08-18 16:22:13 -07001330 if (prof_dump_printf(propagate_err, "@")) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001331 ret = true;
1332 goto label_return;
Jason Evans6109fe02010-02-10 10:37:56 -08001333 }
Jason Evans4f37ef62014-01-16 13:23:56 -08001334 for (i = 0; i < bt->len; i++) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001335 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
Jason Evans4f37ef62014-01-16 13:23:56 -08001336 (uintptr_t)bt->vec[i])) {
1337 ret = true;
1338 goto label_return;
1339 }
1340 }
Jason Evans22ca8552010-03-02 11:57:30 -08001341
Jason Evans602c8e02014-08-18 16:22:13 -07001342 if (prof_dump_printf(propagate_err,
1343 "\n"
Jason Evans5fae7dc2015-07-23 13:56:25 -07001344 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
Jason Evans602c8e02014-08-18 16:22:13 -07001345 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1346 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1347 ret = true;
1348 goto label_return;
1349 }
1350
1351 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1352 (void *)&propagate_err) != NULL) {
Jason Evans4f37ef62014-01-16 13:23:56 -08001353 ret = true;
1354 goto label_return;
1355 }
1356
Jason Evans772163b2014-01-17 15:40:52 -08001357 ret = false;
Jason Evans4f37ef62014-01-16 13:23:56 -08001358label_return:
Jason Evans4f37ef62014-01-16 13:23:56 -08001359 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -08001360}
1361
Christopher Ferrise4294032016-03-02 14:33:02 -08001362#ifndef _WIN32
Jason Evanse42c3092015-07-22 15:44:47 -07001363JEMALLOC_FORMAT_PRINTF(1, 2)
Jason Evans8e33c212015-05-01 09:03:20 -07001364static int
1365prof_open_maps(const char *format, ...)
1366{
1367 int mfd;
1368 va_list ap;
1369 char filename[PATH_MAX + 1];
1370
1371 va_start(ap, format);
1372 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1373 va_end(ap);
1374 mfd = open(filename, O_RDONLY);
1375
1376 return (mfd);
1377}
Christopher Ferrise4294032016-03-02 14:33:02 -08001378#endif
1379
1380static int
1381prof_getpid(void)
1382{
1383
1384#ifdef _WIN32
1385 return (GetCurrentProcessId());
1386#else
1387 return (getpid());
1388#endif
1389}
Jason Evans8e33c212015-05-01 09:03:20 -07001390
Jason Evans22ca8552010-03-02 11:57:30 -08001391static bool
1392prof_dump_maps(bool propagate_err)
Jason Evansc7177182010-02-11 09:25:56 -08001393{
Jason Evans93f39f82013-10-21 15:07:40 -07001394 bool ret;
Jason Evansc7177182010-02-11 09:25:56 -08001395 int mfd;
Jason Evansc7177182010-02-11 09:25:56 -08001396
Jason Evans7372b152012-02-10 20:22:09 -08001397 cassert(config_prof);
Harald Weppnerc2da2592014-03-18 00:00:14 -07001398#ifdef __FreeBSD__
Jason Evans8e33c212015-05-01 09:03:20 -07001399 mfd = prof_open_maps("/proc/curproc/map");
Christopher Ferrise4294032016-03-02 14:33:02 -08001400#elif defined(_WIN32)
1401 mfd = -1; // Not implemented
Harald Weppnerc2da2592014-03-18 00:00:14 -07001402#else
Jason Evans8e33c212015-05-01 09:03:20 -07001403 {
Christopher Ferrise4294032016-03-02 14:33:02 -08001404 int pid = prof_getpid();
Jason Evans8e33c212015-05-01 09:03:20 -07001405
1406 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1407 if (mfd == -1)
1408 mfd = prof_open_maps("/proc/%d/maps", pid);
1409 }
Harald Weppnerc2da2592014-03-18 00:00:14 -07001410#endif
Jason Evansc7177182010-02-11 09:25:56 -08001411 if (mfd != -1) {
1412 ssize_t nread;
1413
Jason Evans4f37ef62014-01-16 13:23:56 -08001414 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
Jason Evans93f39f82013-10-21 15:07:40 -07001415 propagate_err) {
1416 ret = true;
1417 goto label_return;
1418 }
Jason Evansc7177182010-02-11 09:25:56 -08001419 nread = 0;
1420 do {
1421 prof_dump_buf_end += nread;
Jason Evanscd9a1342012-03-21 18:33:03 -07001422 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
Jason Evansc7177182010-02-11 09:25:56 -08001423 /* Make space in prof_dump_buf before read(). */
Jason Evans4f37ef62014-01-16 13:23:56 -08001424 if (prof_dump_flush(propagate_err) &&
Jason Evans93f39f82013-10-21 15:07:40 -07001425 propagate_err) {
1426 ret = true;
1427 goto label_return;
1428 }
Jason Evansc7177182010-02-11 09:25:56 -08001429 }
1430 nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
Jason Evanscd9a1342012-03-21 18:33:03 -07001431 PROF_DUMP_BUFSIZE - prof_dump_buf_end);
Jason Evansc7177182010-02-11 09:25:56 -08001432 } while (nread > 0);
Jason Evans93f39f82013-10-21 15:07:40 -07001433 } else {
1434 ret = true;
1435 goto label_return;
1436 }
Jason Evans22ca8552010-03-02 11:57:30 -08001437
Jason Evans93f39f82013-10-21 15:07:40 -07001438 ret = false;
1439label_return:
1440 if (mfd != -1)
1441 close(mfd);
1442 return (ret);
Jason Evansc7177182010-02-11 09:25:56 -08001443}
1444
Jason Evans4f37ef62014-01-16 13:23:56 -08001445static void
Jason Evans602c8e02014-08-18 16:22:13 -07001446prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
Jason Evans4f37ef62014-01-16 13:23:56 -08001447 const char *filename)
1448{
1449
1450 if (cnt_all->curbytes != 0) {
Jason Evans5fae7dc2015-07-23 13:56:25 -07001451 malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
1452 FMTu64" object%s, %zu context%s\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001453 cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
1454 cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
Jason Evans602c8e02014-08-18 16:22:13 -07001455 leak_ngctx, (leak_ngctx != 1) ? "s" : "");
Jason Evans4f37ef62014-01-16 13:23:56 -08001456 malloc_printf(
Jason Evans70417202015-05-01 12:31:12 -07001457 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
Jason Evans4f37ef62014-01-16 13:23:56 -08001458 filename);
1459 }
1460}
1461
Jason Evans602c8e02014-08-18 16:22:13 -07001462static prof_gctx_t *
1463prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001464{
Jason Evans602c8e02014-08-18 16:22:13 -07001465 prof_gctx_t *ret;
Jason Evans3a81cbd2014-08-16 12:58:55 -07001466 bool propagate_err = *(bool *)arg;
1467
Jason Evans602c8e02014-08-18 16:22:13 -07001468 malloc_mutex_lock(gctx->lock);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001469
Jason Evans602c8e02014-08-18 16:22:13 -07001470 if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
Jason Evans20c31de2014-10-02 23:01:10 -07001471 ret = gctx;
Jason Evans602c8e02014-08-18 16:22:13 -07001472 goto label_return;
1473 }
Jason Evans3a81cbd2014-08-16 12:58:55 -07001474
Jason Evans602c8e02014-08-18 16:22:13 -07001475 ret = NULL;
1476label_return:
1477 malloc_mutex_unlock(gctx->lock);
1478 return (ret);
Jason Evans3a81cbd2014-08-16 12:58:55 -07001479}
1480
Jason Evans22ca8552010-03-02 11:57:30 -08001481static bool
Jason Evans5460aa62014-09-22 21:09:23 -07001482prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
Jason Evans6109fe02010-02-10 10:37:56 -08001483{
Jason Evans602c8e02014-08-18 16:22:13 -07001484 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001485 prof_cnt_t cnt_all;
1486 size_t tabind;
Jason Evans075e77c2010-09-20 19:53:25 -07001487 union {
Jason Evans602c8e02014-08-18 16:22:13 -07001488 prof_gctx_t *p;
Jason Evans075e77c2010-09-20 19:53:25 -07001489 void *v;
Jason Evans602c8e02014-08-18 16:22:13 -07001490 } gctx;
1491 size_t leak_ngctx;
1492 prof_gctx_tree_t gctxs;
Jason Evans6109fe02010-02-10 10:37:56 -08001493
Jason Evans7372b152012-02-10 20:22:09 -08001494 cassert(config_prof);
1495
Jason Evans20c31de2014-10-02 23:01:10 -07001496 tdata = prof_tdata_get(tsd, true);
Jason Evans5460aa62014-09-22 21:09:23 -07001497 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001498 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08001499
1500 malloc_mutex_lock(&prof_dump_mtx);
Jason Evansc93ed812014-10-30 16:50:33 -07001501 prof_enter(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001502
Jason Evans602c8e02014-08-18 16:22:13 -07001503 /*
1504 * Put gctx's in limbo and clear their counters in preparation for
1505 * summing.
1506 */
1507 gctx_tree_new(&gctxs);
Jason Evans551ebc42014-10-03 10:16:09 -07001508 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
Jason Evans602c8e02014-08-18 16:22:13 -07001509 prof_dump_gctx_prep(gctx.p, &gctxs);
1510
1511 /*
1512 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1513 * stats and merge them into the associated gctx's.
1514 */
Jason Evans6109fe02010-02-10 10:37:56 -08001515 memset(&cnt_all, 0, sizeof(prof_cnt_t));
Jason Evans602c8e02014-08-18 16:22:13 -07001516 malloc_mutex_lock(&tdatas_mtx);
1517 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
1518 malloc_mutex_unlock(&tdatas_mtx);
1519
1520 /* Merge tctx stats into gctx's. */
1521 leak_ngctx = 0;
1522 gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
1523
Jason Evansc93ed812014-10-30 16:50:33 -07001524 prof_leave(tsd, tdata);
Jason Evans4f37ef62014-01-16 13:23:56 -08001525
1526 /* Create dump file. */
Jason Evans772163b2014-01-17 15:40:52 -08001527 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001528 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001529
1530 /* Dump profile header. */
Jason Evans4f37ef62014-01-16 13:23:56 -08001531 if (prof_dump_header(propagate_err, &cnt_all))
1532 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001533
Jason Evans602c8e02014-08-18 16:22:13 -07001534 /* Dump per gctx profile stats. */
1535 if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
1536 (void *)&propagate_err) != NULL)
Jason Evans3a81cbd2014-08-16 12:58:55 -07001537 goto label_write_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001538
Jason Evansc7177182010-02-11 09:25:56 -08001539 /* Dump /proc/<pid>/maps if possible. */
Jason Evans22ca8552010-03-02 11:57:30 -08001540 if (prof_dump_maps(propagate_err))
Jason Evans4f37ef62014-01-16 13:23:56 -08001541 goto label_write_error;
Jason Evansc7177182010-02-11 09:25:56 -08001542
Jason Evans4f37ef62014-01-16 13:23:56 -08001543 if (prof_dump_close(propagate_err))
1544 goto label_open_close_error;
Jason Evans6109fe02010-02-10 10:37:56 -08001545
Jason Evans20c31de2014-10-02 23:01:10 -07001546 prof_gctx_finish(tsd, &gctxs);
Jason Evans4f37ef62014-01-16 13:23:56 -08001547 malloc_mutex_unlock(&prof_dump_mtx);
1548
1549 if (leakcheck)
Jason Evans602c8e02014-08-18 16:22:13 -07001550 prof_leakcheck(&cnt_all, leak_ngctx, filename);
Jason Evans22ca8552010-03-02 11:57:30 -08001551
1552 return (false);
Jason Evans4f37ef62014-01-16 13:23:56 -08001553label_write_error:
1554 prof_dump_close(propagate_err);
1555label_open_close_error:
Jason Evans20c31de2014-10-02 23:01:10 -07001556 prof_gctx_finish(tsd, &gctxs);
Jason Evans4f37ef62014-01-16 13:23:56 -08001557 malloc_mutex_unlock(&prof_dump_mtx);
Jason Evans22ca8552010-03-02 11:57:30 -08001558 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08001559}
1560
Jason Evansd81e4bd2012-03-06 14:57:45 -08001561#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
Jason Evans4f37ef62014-01-16 13:23:56 -08001562#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
Jason Evans6109fe02010-02-10 10:37:56 -08001563static void
Chris Peterson3e310b32014-05-28 19:04:06 -07001564prof_dump_filename(char *filename, char v, uint64_t vseq)
Jason Evans6109fe02010-02-10 10:37:56 -08001565{
Jason Evans6109fe02010-02-10 10:37:56 -08001566
Jason Evans7372b152012-02-10 20:22:09 -08001567 cassert(config_prof);
1568
Jason Evans4f37ef62014-01-16 13:23:56 -08001569 if (vseq != VSEQ_INVALID) {
Jason Evansd81e4bd2012-03-06 14:57:45 -08001570 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1571 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001572 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
Christopher Ferrise4294032016-03-02 14:33:02 -08001573 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001574 } else {
1575 /* "<prefix>.<pid>.<seq>.<v>.heap" */
1576 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
Jason Evans5fae7dc2015-07-23 13:56:25 -07001577 "%s.%d.%"FMTu64".%c.heap",
Christopher Ferrise4294032016-03-02 14:33:02 -08001578 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
Jason Evans6109fe02010-02-10 10:37:56 -08001579 }
Jason Evans52386b22012-04-22 16:00:11 -07001580 prof_dump_seq++;
Jason Evans6109fe02010-02-10 10:37:56 -08001581}
1582
1583static void
1584prof_fdump(void)
1585{
Jason Evans5460aa62014-09-22 21:09:23 -07001586 tsd_t *tsd;
Jason Evans6109fe02010-02-10 10:37:56 -08001587 char filename[DUMP_FILENAME_BUFSIZE];
1588
Jason Evans7372b152012-02-10 20:22:09 -08001589 cassert(config_prof);
Jason Evans57efa7b2014-10-08 17:57:19 -07001590 assert(opt_prof_final);
1591 assert(opt_prof_prefix[0] != '\0');
Jason Evans7372b152012-02-10 20:22:09 -08001592
Jason Evans551ebc42014-10-03 10:16:09 -07001593 if (!prof_booted)
Jason Evans6109fe02010-02-10 10:37:56 -08001594 return;
Jason Evans029d44c2014-10-04 11:12:53 -07001595 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001596
Jason Evans57efa7b2014-10-08 17:57:19 -07001597 malloc_mutex_lock(&prof_dump_seq_mtx);
1598 prof_dump_filename(filename, 'f', VSEQ_INVALID);
1599 malloc_mutex_unlock(&prof_dump_seq_mtx);
1600 prof_dump(tsd, false, filename, opt_prof_leak);
Jason Evans6109fe02010-02-10 10:37:56 -08001601}
1602
1603void
1604prof_idump(void)
1605{
Jason Evans5460aa62014-09-22 21:09:23 -07001606 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001607 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001608
Jason Evans7372b152012-02-10 20:22:09 -08001609 cassert(config_prof);
1610
Jason Evans551ebc42014-10-03 10:16:09 -07001611 if (!prof_booted)
Jason Evans6109fe02010-02-10 10:37:56 -08001612 return;
Jason Evans029d44c2014-10-04 11:12:53 -07001613 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07001614 tdata = prof_tdata_get(tsd, false);
1615 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001616 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001617 if (tdata->enq) {
1618 tdata->enq_idump = true;
Jason Evansd34f9e72010-02-11 13:19:21 -08001619 return;
1620 }
Jason Evans6109fe02010-02-10 10:37:56 -08001621
Jason Evanse7339702010-10-23 18:37:06 -07001622 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001623 char filename[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -07001624 malloc_mutex_lock(&prof_dump_seq_mtx);
1625 prof_dump_filename(filename, 'i', prof_dump_iseq);
1626 prof_dump_iseq++;
1627 malloc_mutex_unlock(&prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001628 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001629 }
Jason Evans6109fe02010-02-10 10:37:56 -08001630}
1631
Jason Evans22ca8552010-03-02 11:57:30 -08001632bool
1633prof_mdump(const char *filename)
Jason Evans6109fe02010-02-10 10:37:56 -08001634{
Jason Evans5460aa62014-09-22 21:09:23 -07001635 tsd_t *tsd;
Jason Evans22ca8552010-03-02 11:57:30 -08001636 char filename_buf[DUMP_FILENAME_BUFSIZE];
Jason Evans6109fe02010-02-10 10:37:56 -08001637
Jason Evans7372b152012-02-10 20:22:09 -08001638 cassert(config_prof);
1639
Jason Evans551ebc42014-10-03 10:16:09 -07001640 if (!opt_prof || !prof_booted)
Jason Evans22ca8552010-03-02 11:57:30 -08001641 return (true);
Jason Evans029d44c2014-10-04 11:12:53 -07001642 tsd = tsd_fetch();
Jason Evans6109fe02010-02-10 10:37:56 -08001643
Jason Evans22ca8552010-03-02 11:57:30 -08001644 if (filename == NULL) {
1645 /* No filename specified, so automatically generate one. */
Jason Evanse7339702010-10-23 18:37:06 -07001646 if (opt_prof_prefix[0] == '\0')
1647 return (true);
Jason Evans22ca8552010-03-02 11:57:30 -08001648 malloc_mutex_lock(&prof_dump_seq_mtx);
1649 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
1650 prof_dump_mseq++;
1651 malloc_mutex_unlock(&prof_dump_seq_mtx);
1652 filename = filename_buf;
1653 }
Jason Evans5460aa62014-09-22 21:09:23 -07001654 return (prof_dump(tsd, true, filename, false));
Jason Evans6109fe02010-02-10 10:37:56 -08001655}
1656
1657void
Jason Evanse7339702010-10-23 18:37:06 -07001658prof_gdump(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001659{
Jason Evans5460aa62014-09-22 21:09:23 -07001660 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07001661 prof_tdata_t *tdata;
Jason Evans6109fe02010-02-10 10:37:56 -08001662
Jason Evans7372b152012-02-10 20:22:09 -08001663 cassert(config_prof);
1664
Jason Evans551ebc42014-10-03 10:16:09 -07001665 if (!prof_booted)
Jason Evans6109fe02010-02-10 10:37:56 -08001666 return;
Jason Evans029d44c2014-10-04 11:12:53 -07001667 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07001668 tdata = prof_tdata_get(tsd, false);
1669 if (tdata == NULL)
Jason Evans52386b22012-04-22 16:00:11 -07001670 return;
Jason Evans602c8e02014-08-18 16:22:13 -07001671 if (tdata->enq) {
1672 tdata->enq_gdump = true;
Jason Evans6109fe02010-02-10 10:37:56 -08001673 return;
1674 }
Jason Evans6109fe02010-02-10 10:37:56 -08001675
Jason Evanse7339702010-10-23 18:37:06 -07001676 if (opt_prof_prefix[0] != '\0') {
Dmitry-Me78ae1ac2015-09-08 15:09:20 +03001677 char filename[DUMP_FILENAME_BUFSIZE];
Jason Evanse7339702010-10-23 18:37:06 -07001678 malloc_mutex_lock(&prof_dump_seq_mtx);
1679 prof_dump_filename(filename, 'u', prof_dump_useq);
1680 prof_dump_useq++;
1681 malloc_mutex_unlock(&prof_dump_seq_mtx);
Jason Evans5460aa62014-09-22 21:09:23 -07001682 prof_dump(tsd, false, filename, false);
Jason Evanse7339702010-10-23 18:37:06 -07001683 }
Jason Evans6109fe02010-02-10 10:37:56 -08001684}
1685
1686static void
Jason Evansae03bf62013-01-22 12:02:08 -08001687prof_bt_hash(const void *key, size_t r_hash[2])
Jason Evans6109fe02010-02-10 10:37:56 -08001688{
Jason Evans6109fe02010-02-10 10:37:56 -08001689 prof_bt_t *bt = (prof_bt_t *)key;
1690
Jason Evans7372b152012-02-10 20:22:09 -08001691 cassert(config_prof);
Jason Evans6109fe02010-02-10 10:37:56 -08001692
Jason Evansae03bf62013-01-22 12:02:08 -08001693 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
Jason Evans6109fe02010-02-10 10:37:56 -08001694}
1695
1696static bool
1697prof_bt_keycomp(const void *k1, const void *k2)
1698{
1699 const prof_bt_t *bt1 = (prof_bt_t *)k1;
1700 const prof_bt_t *bt2 = (prof_bt_t *)k2;
1701
Jason Evans7372b152012-02-10 20:22:09 -08001702 cassert(config_prof);
1703
Jason Evans6109fe02010-02-10 10:37:56 -08001704 if (bt1->len != bt2->len)
1705 return (false);
1706 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
1707}
1708
Jason Evans602c8e02014-08-18 16:22:13 -07001709JEMALLOC_INLINE_C uint64_t
1710prof_thr_uid_alloc(void)
Jason Evans6109fe02010-02-10 10:37:56 -08001711{
Jason Evans9d8f3d22014-09-11 18:06:30 -07001712 uint64_t thr_uid;
Jason Evans602c8e02014-08-18 16:22:13 -07001713
Jason Evans9d8f3d22014-09-11 18:06:30 -07001714 malloc_mutex_lock(&next_thr_uid_mtx);
1715 thr_uid = next_thr_uid;
1716 next_thr_uid++;
1717 malloc_mutex_unlock(&next_thr_uid_mtx);
1718
1719 return (thr_uid);
Jason Evans602c8e02014-08-18 16:22:13 -07001720}
1721
1722static prof_tdata_t *
Jason Evansfc12c0b2014-10-03 23:25:30 -07001723prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
1724 char *thread_name, bool active)
Jason Evans602c8e02014-08-18 16:22:13 -07001725{
1726 prof_tdata_t *tdata;
Jason Evans1cb181e2015-01-29 15:30:47 -08001727 tcache_t *tcache;
Jason Evans6109fe02010-02-10 10:37:56 -08001728
Jason Evans7372b152012-02-10 20:22:09 -08001729 cassert(config_prof);
1730
Jason Evans4d6a1342010-10-20 19:05:59 -07001731 /* Initialize an empty cache for this thread. */
Jason Evans1cb181e2015-01-29 15:30:47 -08001732 tcache = tcache_get(tsd, true);
Qi Wangf4a0f322015-10-27 15:12:10 -07001733 tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
1734 size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001735 if (tdata == NULL)
Jason Evans4d6a1342010-10-20 19:05:59 -07001736 return (NULL);
1737
Jason Evans602c8e02014-08-18 16:22:13 -07001738 tdata->lock = prof_tdata_mutex_choose(thr_uid);
1739 tdata->thr_uid = thr_uid;
Jason Evans20c31de2014-10-02 23:01:10 -07001740 tdata->thr_discrim = thr_discrim;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001741 tdata->thread_name = thread_name;
Jason Evans20c31de2014-10-02 23:01:10 -07001742 tdata->attached = true;
1743 tdata->expired = false;
Jason Evans04211e22015-03-16 15:11:06 -07001744 tdata->tctx_uid_next = 0;
Jason Evans602c8e02014-08-18 16:22:13 -07001745
Jason Evans5460aa62014-09-22 21:09:23 -07001746 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
Jason Evans4d6a1342010-10-20 19:05:59 -07001747 prof_bt_hash, prof_bt_keycomp)) {
Qi Wangf4a0f322015-10-27 15:12:10 -07001748 idalloctm(tsd, tdata, tcache, true, true);
Jason Evans4d6a1342010-10-20 19:05:59 -07001749 return (NULL);
1750 }
Jason Evans4d6a1342010-10-20 19:05:59 -07001751
Jason Evans602c8e02014-08-18 16:22:13 -07001752 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
1753 prof_sample_threshold_update(tdata);
Jason Evans4d6a1342010-10-20 19:05:59 -07001754
Jason Evans602c8e02014-08-18 16:22:13 -07001755 tdata->enq = false;
1756 tdata->enq_idump = false;
1757 tdata->enq_gdump = false;
Jason Evans52386b22012-04-22 16:00:11 -07001758
Jason Evans602c8e02014-08-18 16:22:13 -07001759 tdata->dumping = false;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001760 tdata->active = active;
Jason Evans4d6a1342010-10-20 19:05:59 -07001761
Jason Evans602c8e02014-08-18 16:22:13 -07001762 malloc_mutex_lock(&tdatas_mtx);
1763 tdata_tree_insert(&tdatas, tdata);
1764 malloc_mutex_unlock(&tdatas_mtx);
1765
1766 return (tdata);
1767}
1768
1769prof_tdata_t *
Jason Evans5460aa62014-09-22 21:09:23 -07001770prof_tdata_init(tsd_t *tsd)
Jason Evans602c8e02014-08-18 16:22:13 -07001771{
1772
Jason Evansfc12c0b2014-10-03 23:25:30 -07001773 return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
1774 prof_thread_active_init_get()));
Jason Evans602c8e02014-08-18 16:22:13 -07001775}
1776
1777/* tdata->lock must be held. */
1778static bool
Jason Evansf04a0be2014-10-04 15:03:49 -07001779prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001780{
1781
Jason Evansf04a0be2014-10-04 15:03:49 -07001782 if (tdata->attached && !even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001783 return (false);
1784 if (ckh_count(&tdata->bt2tctx) != 0)
1785 return (false);
1786 return (true);
1787}
1788
Jason Evans20c31de2014-10-02 23:01:10 -07001789/* tdatas_mtx must be held. */
Jason Evans602c8e02014-08-18 16:22:13 -07001790static void
Jason Evansf04a0be2014-10-04 15:03:49 -07001791prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
1792 bool even_if_attached)
Jason Evans602c8e02014-08-18 16:22:13 -07001793{
Jason Evans1cb181e2015-01-29 15:30:47 -08001794 tcache_t *tcache;
Jason Evans602c8e02014-08-18 16:22:13 -07001795
Jason Evansf04a0be2014-10-04 15:03:49 -07001796 assert(prof_tdata_should_destroy(tdata, even_if_attached));
Jason Evans029d44c2014-10-04 11:12:53 -07001797 assert(tsd_prof_tdata_get(tsd) != tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001798
Jason Evans602c8e02014-08-18 16:22:13 -07001799 tdata_tree_remove(&tdatas, tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001800
Jason Evans1cb181e2015-01-29 15:30:47 -08001801 tcache = tcache_get(tsd, false);
Jason Evans602c8e02014-08-18 16:22:13 -07001802 if (tdata->thread_name != NULL)
Qi Wangf4a0f322015-10-27 15:12:10 -07001803 idalloctm(tsd, tdata->thread_name, tcache, true, true);
Jason Evans5460aa62014-09-22 21:09:23 -07001804 ckh_delete(tsd, &tdata->bt2tctx);
Qi Wangf4a0f322015-10-27 15:12:10 -07001805 idalloctm(tsd, tdata, tcache, true, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001806}
1807
1808static void
Jason Evansf04a0be2014-10-04 15:03:49 -07001809prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
Jason Evans20c31de2014-10-02 23:01:10 -07001810{
1811
1812 malloc_mutex_lock(&tdatas_mtx);
Jason Evansf04a0be2014-10-04 15:03:49 -07001813 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
Jason Evans20c31de2014-10-02 23:01:10 -07001814 malloc_mutex_unlock(&tdatas_mtx);
1815}
1816
1817static void
1818prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001819{
1820 bool destroy_tdata;
1821
1822 malloc_mutex_lock(tdata->lock);
Jason Evans20c31de2014-10-02 23:01:10 -07001823 if (tdata->attached) {
Jason Evansf04a0be2014-10-04 15:03:49 -07001824 destroy_tdata = prof_tdata_should_destroy(tdata, true);
1825 /*
1826 * Only detach if !destroy_tdata, because detaching would allow
1827 * another thread to win the race to destroy tdata.
1828 */
1829 if (!destroy_tdata)
1830 tdata->attached = false;
Jason Evans029d44c2014-10-04 11:12:53 -07001831 tsd_prof_tdata_set(tsd, NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001832 } else
1833 destroy_tdata = false;
1834 malloc_mutex_unlock(tdata->lock);
1835 if (destroy_tdata)
Jason Evansf04a0be2014-10-04 15:03:49 -07001836 prof_tdata_destroy(tsd, tdata, true);
Jason Evans602c8e02014-08-18 16:22:13 -07001837}
1838
Jason Evans20c31de2014-10-02 23:01:10 -07001839prof_tdata_t *
1840prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001841{
Jason Evans20c31de2014-10-02 23:01:10 -07001842 uint64_t thr_uid = tdata->thr_uid;
1843 uint64_t thr_discrim = tdata->thr_discrim + 1;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001844 char *thread_name = (tdata->thread_name != NULL) ?
1845 prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
1846 bool active = tdata->active;
Jason Evans602c8e02014-08-18 16:22:13 -07001847
Jason Evans20c31de2014-10-02 23:01:10 -07001848 prof_tdata_detach(tsd, tdata);
Jason Evansfc12c0b2014-10-03 23:25:30 -07001849 return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
1850 active));
Jason Evans602c8e02014-08-18 16:22:13 -07001851}
1852
Jason Evans20c31de2014-10-02 23:01:10 -07001853static bool
1854prof_tdata_expire(prof_tdata_t *tdata)
Jason Evans602c8e02014-08-18 16:22:13 -07001855{
Jason Evans20c31de2014-10-02 23:01:10 -07001856 bool destroy_tdata;
Jason Evans602c8e02014-08-18 16:22:13 -07001857
Jason Evans20c31de2014-10-02 23:01:10 -07001858 malloc_mutex_lock(tdata->lock);
1859 if (!tdata->expired) {
1860 tdata->expired = true;
1861 destroy_tdata = tdata->attached ? false :
Jason Evansf04a0be2014-10-04 15:03:49 -07001862 prof_tdata_should_destroy(tdata, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001863 } else
1864 destroy_tdata = false;
1865 malloc_mutex_unlock(tdata->lock);
1866
1867 return (destroy_tdata);
Jason Evans602c8e02014-08-18 16:22:13 -07001868}
1869
1870static prof_tdata_t *
1871prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
1872{
1873
Jason Evans20c31de2014-10-02 23:01:10 -07001874 return (prof_tdata_expire(tdata) ? tdata : NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001875}
1876
1877void
Jason Evans5460aa62014-09-22 21:09:23 -07001878prof_reset(tsd_t *tsd, size_t lg_sample)
Jason Evans602c8e02014-08-18 16:22:13 -07001879{
Jason Evans20c31de2014-10-02 23:01:10 -07001880 prof_tdata_t *next;
Jason Evans602c8e02014-08-18 16:22:13 -07001881
1882 assert(lg_sample < (sizeof(uint64_t) << 3));
1883
1884 malloc_mutex_lock(&prof_dump_mtx);
1885 malloc_mutex_lock(&tdatas_mtx);
1886
1887 lg_prof_sample = lg_sample;
Jason Evans20c31de2014-10-02 23:01:10 -07001888
1889 next = NULL;
1890 do {
1891 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
1892 prof_tdata_reset_iter, NULL);
1893 if (to_destroy != NULL) {
1894 next = tdata_tree_next(&tdatas, to_destroy);
Jason Evansf04a0be2014-10-04 15:03:49 -07001895 prof_tdata_destroy_locked(tsd, to_destroy, false);
Jason Evans20c31de2014-10-02 23:01:10 -07001896 } else
1897 next = NULL;
1898 } while (next != NULL);
Jason Evans602c8e02014-08-18 16:22:13 -07001899
1900 malloc_mutex_unlock(&tdatas_mtx);
1901 malloc_mutex_unlock(&prof_dump_mtx);
Jason Evans4d6a1342010-10-20 19:05:59 -07001902}
1903
Jason Evanscd9a1342012-03-21 18:33:03 -07001904void
Jason Evans5460aa62014-09-22 21:09:23 -07001905prof_tdata_cleanup(tsd_t *tsd)
Jason Evans4d6a1342010-10-20 19:05:59 -07001906{
Jason Evans5460aa62014-09-22 21:09:23 -07001907 prof_tdata_t *tdata;
Jason Evans4d6a1342010-10-20 19:05:59 -07001908
Jason Evans5460aa62014-09-22 21:09:23 -07001909 if (!config_prof)
1910 return;
Jason Evans7372b152012-02-10 20:22:09 -08001911
Jason Evans5460aa62014-09-22 21:09:23 -07001912 tdata = tsd_prof_tdata_get(tsd);
1913 if (tdata != NULL)
1914 prof_tdata_detach(tsd, tdata);
Jason Evans6109fe02010-02-10 10:37:56 -08001915}
1916
Jason Evansfc12c0b2014-10-03 23:25:30 -07001917bool
1918prof_active_get(void)
1919{
1920 bool prof_active_current;
1921
1922 malloc_mutex_lock(&prof_active_mtx);
1923 prof_active_current = prof_active;
1924 malloc_mutex_unlock(&prof_active_mtx);
1925 return (prof_active_current);
1926}
1927
1928bool
1929prof_active_set(bool active)
1930{
1931 bool prof_active_old;
1932
1933 malloc_mutex_lock(&prof_active_mtx);
1934 prof_active_old = prof_active;
1935 prof_active = active;
1936 malloc_mutex_unlock(&prof_active_mtx);
1937 return (prof_active_old);
1938}
1939
Jason Evans602c8e02014-08-18 16:22:13 -07001940const char *
1941prof_thread_name_get(void)
1942{
Jason Evans5460aa62014-09-22 21:09:23 -07001943 tsd_t *tsd;
1944 prof_tdata_t *tdata;
1945
Jason Evans029d44c2014-10-04 11:12:53 -07001946 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07001947 tdata = prof_tdata_get(tsd, true);
1948 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001949 return ("");
1950 return (tdata->thread_name != NULL ? tdata->thread_name : "");
Jason Evans602c8e02014-08-18 16:22:13 -07001951}
1952
Jason Evansfc12c0b2014-10-03 23:25:30 -07001953static char *
1954prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
1955{
1956 char *ret;
1957 size_t size;
1958
1959 if (thread_name == NULL)
1960 return (NULL);
1961
1962 size = strlen(thread_name) + 1;
1963 if (size == 1)
1964 return ("");
1965
Qi Wangf4a0f322015-10-27 15:12:10 -07001966 ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
1967 true), true, NULL, true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07001968 if (ret == NULL)
1969 return (NULL);
1970 memcpy(ret, thread_name, size);
1971 return (ret);
1972}
1973
1974int
Jason Evans5460aa62014-09-22 21:09:23 -07001975prof_thread_name_set(tsd_t *tsd, const char *thread_name)
Jason Evans602c8e02014-08-18 16:22:13 -07001976{
1977 prof_tdata_t *tdata;
Jason Evansfc12c0b2014-10-03 23:25:30 -07001978 unsigned i;
Jason Evans602c8e02014-08-18 16:22:13 -07001979 char *s;
1980
Jason Evans5460aa62014-09-22 21:09:23 -07001981 tdata = prof_tdata_get(tsd, true);
1982 if (tdata == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001983 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07001984
Jason Evansfc12c0b2014-10-03 23:25:30 -07001985 /* Validate input. */
1986 if (thread_name == NULL)
1987 return (EFAULT);
1988 for (i = 0; thread_name[i] != '\0'; i++) {
1989 char c = thread_name[i];
1990 if (!isgraph(c) && !isblank(c))
1991 return (EFAULT);
1992 }
1993
1994 s = prof_thread_name_alloc(tsd, thread_name);
Jason Evans602c8e02014-08-18 16:22:13 -07001995 if (s == NULL)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001996 return (EAGAIN);
Jason Evans602c8e02014-08-18 16:22:13 -07001997
Jason Evansfc12c0b2014-10-03 23:25:30 -07001998 if (tdata->thread_name != NULL) {
Jason Evans1cb181e2015-01-29 15:30:47 -08001999 idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
Qi Wangf4a0f322015-10-27 15:12:10 -07002000 true, true);
Jason Evansfc12c0b2014-10-03 23:25:30 -07002001 tdata->thread_name = NULL;
2002 }
2003 if (strlen(s) > 0)
2004 tdata->thread_name = s;
2005 return (0);
Jason Evans602c8e02014-08-18 16:22:13 -07002006}
2007
2008bool
2009prof_thread_active_get(void)
2010{
Jason Evans5460aa62014-09-22 21:09:23 -07002011 tsd_t *tsd;
2012 prof_tdata_t *tdata;
2013
Jason Evans029d44c2014-10-04 11:12:53 -07002014 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07002015 tdata = prof_tdata_get(tsd, true);
2016 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002017 return (false);
2018 return (tdata->active);
2019}
2020
2021bool
2022prof_thread_active_set(bool active)
2023{
Jason Evans5460aa62014-09-22 21:09:23 -07002024 tsd_t *tsd;
Jason Evans602c8e02014-08-18 16:22:13 -07002025 prof_tdata_t *tdata;
2026
Jason Evans029d44c2014-10-04 11:12:53 -07002027 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07002028 tdata = prof_tdata_get(tsd, true);
2029 if (tdata == NULL)
Jason Evans602c8e02014-08-18 16:22:13 -07002030 return (true);
2031 tdata->active = active;
2032 return (false);
2033}
2034
Jason Evansfc12c0b2014-10-03 23:25:30 -07002035bool
2036prof_thread_active_init_get(void)
2037{
2038 bool active_init;
2039
2040 malloc_mutex_lock(&prof_thread_active_init_mtx);
2041 active_init = prof_thread_active_init;
2042 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2043 return (active_init);
2044}
2045
2046bool
2047prof_thread_active_init_set(bool active_init)
2048{
2049 bool active_init_old;
2050
2051 malloc_mutex_lock(&prof_thread_active_init_mtx);
2052 active_init_old = prof_thread_active_init;
2053 prof_thread_active_init = active_init;
2054 malloc_mutex_unlock(&prof_thread_active_init_mtx);
2055 return (active_init_old);
2056}
2057
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002058bool
2059prof_gdump_get(void)
2060{
2061 bool prof_gdump_current;
2062
2063 malloc_mutex_lock(&prof_gdump_mtx);
2064 prof_gdump_current = prof_gdump_val;
2065 malloc_mutex_unlock(&prof_gdump_mtx);
2066 return (prof_gdump_current);
2067}
2068
2069bool
2070prof_gdump_set(bool gdump)
2071{
2072 bool prof_gdump_old;
2073
2074 malloc_mutex_lock(&prof_gdump_mtx);
2075 prof_gdump_old = prof_gdump_val;
2076 prof_gdump_val = gdump;
2077 malloc_mutex_unlock(&prof_gdump_mtx);
2078 return (prof_gdump_old);
2079}
2080
Jason Evans6109fe02010-02-10 10:37:56 -08002081void
2082prof_boot0(void)
2083{
2084
Jason Evans7372b152012-02-10 20:22:09 -08002085 cassert(config_prof);
2086
Jason Evanse7339702010-10-23 18:37:06 -07002087 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2088 sizeof(PROF_PREFIX_DEFAULT));
2089}
2090
2091void
2092prof_boot1(void)
2093{
2094
Jason Evans7372b152012-02-10 20:22:09 -08002095 cassert(config_prof);
2096
Jason Evans6109fe02010-02-10 10:37:56 -08002097 /*
Jason Evans9b0cbf02014-04-11 14:24:51 -07002098 * opt_prof must be in its final state before any arenas are
2099 * initialized, so this function must be executed early.
Jason Evans6109fe02010-02-10 10:37:56 -08002100 */
2101
Jason Evans551ebc42014-10-03 10:16:09 -07002102 if (opt_prof_leak && !opt_prof) {
Jason Evans6109fe02010-02-10 10:37:56 -08002103 /*
2104 * Enable opt_prof, but in such a way that profiles are never
2105 * automatically dumped.
2106 */
2107 opt_prof = true;
Jason Evanse7339702010-10-23 18:37:06 -07002108 opt_prof_gdump = false;
Jason Evansa02fc082010-03-31 17:35:51 -07002109 } else if (opt_prof) {
2110 if (opt_lg_prof_interval >= 0) {
2111 prof_interval = (((uint64_t)1U) <<
2112 opt_lg_prof_interval);
Jason Evansa3b33862012-11-13 12:56:27 -08002113 }
Jason Evansa02fc082010-03-31 17:35:51 -07002114 }
Jason Evans6109fe02010-02-10 10:37:56 -08002115}
2116
2117bool
Jason Evanse7339702010-10-23 18:37:06 -07002118prof_boot2(void)
Jason Evans6109fe02010-02-10 10:37:56 -08002119{
2120
Jason Evans7372b152012-02-10 20:22:09 -08002121 cassert(config_prof);
2122
Jason Evans6109fe02010-02-10 10:37:56 -08002123 if (opt_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002124 tsd_t *tsd;
Jason Evans6da54182012-03-23 18:05:51 -07002125 unsigned i;
2126
Jason Evans602c8e02014-08-18 16:22:13 -07002127 lg_prof_sample = opt_lg_prof_sample;
2128
Jason Evansfc12c0b2014-10-03 23:25:30 -07002129 prof_active = opt_prof_active;
2130 if (malloc_mutex_init(&prof_active_mtx))
2131 return (true);
2132
Jason Evans5b8ed5b2015-01-25 21:16:57 -08002133 prof_gdump_val = opt_prof_gdump;
2134 if (malloc_mutex_init(&prof_gdump_mtx))
2135 return (true);
2136
Jason Evansfc12c0b2014-10-03 23:25:30 -07002137 prof_thread_active_init = opt_prof_thread_active_init;
2138 if (malloc_mutex_init(&prof_thread_active_init_mtx))
2139 return (true);
2140
Jason Evans029d44c2014-10-04 11:12:53 -07002141 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07002142 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
Jason Evans6109fe02010-02-10 10:37:56 -08002143 prof_bt_keycomp))
2144 return (true);
Jason Evans602c8e02014-08-18 16:22:13 -07002145 if (malloc_mutex_init(&bt2gctx_mtx))
Jason Evans6109fe02010-02-10 10:37:56 -08002146 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002147
Jason Evans602c8e02014-08-18 16:22:13 -07002148 tdata_tree_new(&tdatas);
2149 if (malloc_mutex_init(&tdatas_mtx))
2150 return (true);
2151
2152 next_thr_uid = 0;
Jason Evans9d8f3d22014-09-11 18:06:30 -07002153 if (malloc_mutex_init(&next_thr_uid_mtx))
2154 return (true);
Jason Evans602c8e02014-08-18 16:22:13 -07002155
Jason Evans6109fe02010-02-10 10:37:56 -08002156 if (malloc_mutex_init(&prof_dump_seq_mtx))
2157 return (true);
Jason Evans4f37ef62014-01-16 13:23:56 -08002158 if (malloc_mutex_init(&prof_dump_mtx))
2159 return (true);
Jason Evans6109fe02010-02-10 10:37:56 -08002160
Jason Evans57efa7b2014-10-08 17:57:19 -07002161 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
2162 atexit(prof_fdump) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08002163 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evans6109fe02010-02-10 10:37:56 -08002164 if (opt_abort)
2165 abort();
2166 }
Jason Evans6da54182012-03-23 18:05:51 -07002167
Jason Evans602c8e02014-08-18 16:22:13 -07002168 gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
Jason Evans6da54182012-03-23 18:05:51 -07002169 sizeof(malloc_mutex_t));
Jason Evans602c8e02014-08-18 16:22:13 -07002170 if (gctx_locks == NULL)
Jason Evans6da54182012-03-23 18:05:51 -07002171 return (true);
2172 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
Jason Evans602c8e02014-08-18 16:22:13 -07002173 if (malloc_mutex_init(&gctx_locks[i]))
2174 return (true);
2175 }
2176
2177 tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
2178 sizeof(malloc_mutex_t));
2179 if (tdata_locks == NULL)
2180 return (true);
2181 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
2182 if (malloc_mutex_init(&tdata_locks[i]))
Jason Evans6da54182012-03-23 18:05:51 -07002183 return (true);
2184 }
Jason Evans6109fe02010-02-10 10:37:56 -08002185 }
2186
Jason Evansb27805b2010-02-10 18:15:53 -08002187#ifdef JEMALLOC_PROF_LIBGCC
2188 /*
2189 * Cause the backtracing machinery to allocate its internal state
2190 * before enabling profiling.
2191 */
2192 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
2193#endif
2194
Jason Evans6109fe02010-02-10 10:37:56 -08002195 prof_booted = true;
2196
2197 return (false);
2198}
2199
Jason Evans20f1fc92012-10-09 14:46:22 -07002200void
2201prof_prefork(void)
2202{
2203
2204 if (opt_prof) {
2205 unsigned i;
2206
Jason Evans9d8f3d22014-09-11 18:06:30 -07002207 malloc_mutex_prefork(&tdatas_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002208 malloc_mutex_prefork(&bt2gctx_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002209 malloc_mutex_prefork(&next_thr_uid_mtx);
Jason Evansf1c3da82013-10-21 14:59:10 -07002210 malloc_mutex_prefork(&prof_dump_seq_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002211 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evans602c8e02014-08-18 16:22:13 -07002212 malloc_mutex_prefork(&gctx_locks[i]);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002213 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2214 malloc_mutex_prefork(&tdata_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07002215 }
2216}
2217
2218void
2219prof_postfork_parent(void)
2220{
2221
2222 if (opt_prof) {
2223 unsigned i;
2224
Jason Evans9d8f3d22014-09-11 18:06:30 -07002225 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2226 malloc_mutex_postfork_parent(&tdata_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07002227 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evans602c8e02014-08-18 16:22:13 -07002228 malloc_mutex_postfork_parent(&gctx_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07002229 malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002230 malloc_mutex_postfork_parent(&next_thr_uid_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002231 malloc_mutex_postfork_parent(&bt2gctx_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002232 malloc_mutex_postfork_parent(&tdatas_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002233 }
2234}
2235
2236void
2237prof_postfork_child(void)
2238{
2239
2240 if (opt_prof) {
2241 unsigned i;
2242
Jason Evans9d8f3d22014-09-11 18:06:30 -07002243 for (i = 0; i < PROF_NTDATA_LOCKS; i++)
2244 malloc_mutex_postfork_child(&tdata_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07002245 for (i = 0; i < PROF_NCTX_LOCKS; i++)
Jason Evans602c8e02014-08-18 16:22:13 -07002246 malloc_mutex_postfork_child(&gctx_locks[i]);
Jason Evans20f1fc92012-10-09 14:46:22 -07002247 malloc_mutex_postfork_child(&prof_dump_seq_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002248 malloc_mutex_postfork_child(&next_thr_uid_mtx);
Jason Evans602c8e02014-08-18 16:22:13 -07002249 malloc_mutex_postfork_child(&bt2gctx_mtx);
Jason Evans9d8f3d22014-09-11 18:06:30 -07002250 malloc_mutex_postfork_child(&tdatas_mtx);
Jason Evans20f1fc92012-10-09 14:46:22 -07002251 }
2252}
2253
Jason Evans6109fe02010-02-10 10:37:56 -08002254/******************************************************************************/