blob: 648a8da3ab4be21b43c23859a8dbaf82a83c0902 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070024unsigned nlclasses; /* Number of large size classes. */
25unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080026
27/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080028/*
29 * Function prototypes for static functions that are referenced prior to
30 * definition.
31 */
Jason Evanse476f8a2010-01-16 09:53:50 -080032
Jason Evanse9012632016-11-03 17:11:01 -070033static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
34 arena_chunk_t *chunk);
Jason Evansc1e00ef2016-05-10 22:21:10 -070035static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070036 size_t ndirty_limit);
Jason Evansc1e00ef2016-05-10 22:21:10 -070037static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
Jason Evansb2c0d632016-04-13 23:36:15 -070038 bool dirty, bool cleaned, bool decommitted);
Jason Evansc1e00ef2016-05-10 22:21:10 -070039static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070040 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
Jason Evans5c77af92016-11-14 18:27:23 -080041static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
42 arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080043
44/******************************************************************************/
45
Jason Evans8fadb1a2015-08-04 10:49:46 -070046JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040047arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070048{
49 arena_chunk_t *chunk;
50 size_t pageind, mapbits;
51
Jason Evans8fadb1a2015-08-04 10:49:46 -070052 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
53 pageind = arena_miscelm_to_pageind(miscelm);
54 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070055 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070056}
57
Jason Evans5c77af92016-11-14 18:27:23 -080058JEMALLOC_INLINE_C const extent_node_t *
59arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
60{
61 arena_chunk_t *chunk;
62
63 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
64 return (&chunk->node);
65}
66
Jason Evansc6a2c392016-03-26 17:30:37 -070067JEMALLOC_INLINE_C int
Jason Evans5c77af92016-11-14 18:27:23 -080068arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
69{
Jason Evans23794792016-11-15 13:47:22 -080070 size_t a_sn, b_sn;
Jason Evans5c77af92016-11-14 18:27:23 -080071
72 assert(a != NULL);
73 assert(b != NULL);
74
75 a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
76 b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
77
78 return ((a_sn > b_sn) - (a_sn < b_sn));
79}
80
81JEMALLOC_INLINE_C int
82arena_ad_comp(const arena_chunk_map_misc_t *a,
Jason Evansc6a2c392016-03-26 17:30:37 -070083 const arena_chunk_map_misc_t *b)
84{
85 uintptr_t a_miscelm = (uintptr_t)a;
86 uintptr_t b_miscelm = (uintptr_t)b;
87
88 assert(a != NULL);
89 assert(b != NULL);
90
91 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
92}
93
Jason Evans5c77af92016-11-14 18:27:23 -080094JEMALLOC_INLINE_C int
95arena_snad_comp(const arena_chunk_map_misc_t *a,
96 const arena_chunk_map_misc_t *b)
97{
98 int ret;
99
100 assert(a != NULL);
101 assert(b != NULL);
102
103 ret = arena_sn_comp(a, b);
104 if (ret != 0)
105 return (ret);
106
107 ret = arena_ad_comp(a, b);
108 return (ret);
109}
110
Jason Evansc6a2c392016-03-26 17:30:37 -0700111/* Generate pairing heap functions. */
112ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
Jason Evans5c77af92016-11-14 18:27:23 -0800113 ph_link, arena_snad_comp)
Jason Evansc6a2c392016-03-26 17:30:37 -0700114
Jason Evans0da8ce12016-02-22 16:20:56 -0800115#ifdef JEMALLOC_JET
116#undef run_quantize_floor
Jason Evansab0cfe02016-04-18 15:11:20 -0700117#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
Jason Evans0da8ce12016-02-22 16:20:56 -0800118#endif
119static size_t
120run_quantize_floor(size_t size)
121{
122 size_t ret;
Jason Evans5d8db152016-04-08 14:16:19 -0700123 pszind_t pind;
Jason Evans0da8ce12016-02-22 16:20:56 -0800124
125 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -0700126 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -0800127 assert((size & PAGE_MASK) == 0);
128
Jason Evans5d8db152016-04-08 14:16:19 -0700129 assert(size != 0);
130 assert(size == PAGE_CEILING(size));
131
132 pind = psz2ind(size - large_pad + 1);
133 if (pind == 0) {
134 /*
135 * Avoid underflow. This short-circuit would also do the right
136 * thing for all sizes in the range for which there are
137 * PAGE-spaced size classes, but it's simplest to just handle
138 * the one case that would cause erroneous results.
139 */
140 return (size);
141 }
142 ret = pind2sz(pind - 1) + large_pad;
143 assert(ret <= size);
Jason Evans0da8ce12016-02-22 16:20:56 -0800144 return (ret);
145}
146#ifdef JEMALLOC_JET
147#undef run_quantize_floor
148#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
Jason Evansab0cfe02016-04-18 15:11:20 -0700149run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
Jason Evans0da8ce12016-02-22 16:20:56 -0800150#endif
151
152#ifdef JEMALLOC_JET
153#undef run_quantize_ceil
Jason Evansab0cfe02016-04-18 15:11:20 -0700154#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
Jason Evans0da8ce12016-02-22 16:20:56 -0800155#endif
156static size_t
157run_quantize_ceil(size_t size)
158{
159 size_t ret;
160
161 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -0700162 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -0800163 assert((size & PAGE_MASK) == 0);
164
Jason Evans5d8db152016-04-08 14:16:19 -0700165 ret = run_quantize_floor(size);
166 if (ret < size) {
167 /*
168 * Skip a quantization that may have an adequately large run,
169 * because under-sized runs may be mixed in. This only happens
170 * when an unusual size is requested, i.e. for aligned
171 * allocation, and is just one of several places where linear
172 * search would potentially find sufficiently aligned available
173 * memory somewhere lower.
174 */
175 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
176 }
Jason Evans0da8ce12016-02-22 16:20:56 -0800177 return (ret);
178}
Jason Evansa9a46842016-02-22 14:58:05 -0800179#ifdef JEMALLOC_JET
180#undef run_quantize_ceil
181#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
Jason Evansab0cfe02016-04-18 15:11:20 -0700182run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
Jason Evansa9a46842016-02-22 14:58:05 -0800183#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700184
Jason Evanse3d13062012-10-30 15:42:37 -0700185static void
186arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700187 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700188{
Jason Evansf193fd82016-04-08 14:17:57 -0700189 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700190 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700191 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
192 LG_PAGE));
Jason Evansa4e83e82016-11-07 09:37:12 -0800193 assert((npages << LG_PAGE) < chunksize);
194 assert(pind2sz(pind) <= chunksize);
Jason Evansf193fd82016-04-08 14:17:57 -0700195 arena_run_heap_insert(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700196 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700197}
198
199static void
200arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700201 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700202{
Jason Evansf193fd82016-04-08 14:17:57 -0700203 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700204 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700205 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
206 LG_PAGE));
Jason Evansa4e83e82016-11-07 09:37:12 -0800207 assert((npages << LG_PAGE) < chunksize);
208 assert(pind2sz(pind) <= chunksize);
Jason Evansf193fd82016-04-08 14:17:57 -0700209 arena_run_heap_remove(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700210 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700211}
212
Jason Evans070b3c32014-08-14 14:45:58 -0700213static void
Jason Evansee41ad42015-02-15 18:04:46 -0800214arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700215 size_t npages)
216{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700217 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
218 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800219
Jason Evans070b3c32014-08-14 14:45:58 -0700220 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
221 LG_PAGE));
222 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
223 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
224 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800225
Jason Evans613cdc82016-03-08 01:04:48 -0800226 qr_new(&miscelm->rd, rd_link);
227 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700228 arena->ndirty += npages;
229}
230
231static void
Jason Evansee41ad42015-02-15 18:04:46 -0800232arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700233 size_t npages)
234{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700235 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
236 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800237
Jason Evans070b3c32014-08-14 14:45:58 -0700238 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
239 LG_PAGE));
240 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
241 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
242 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800243
Jason Evans613cdc82016-03-08 01:04:48 -0800244 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800245 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700246 arena->ndirty -= npages;
247}
248
Jason Evansee41ad42015-02-15 18:04:46 -0800249static size_t
250arena_chunk_dirty_npages(const extent_node_t *node)
251{
252
253 return (extent_node_size_get(node) >> LG_PAGE);
254}
255
Jason Evansee41ad42015-02-15 18:04:46 -0800256void
Jason Evans738e0892015-02-18 01:15:50 -0800257arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800258{
259
Jason Evans738e0892015-02-18 01:15:50 -0800260 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800261 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800262 extent_node_dirty_insert(node, &arena->runs_dirty,
263 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800264 arena->ndirty += arena_chunk_dirty_npages(node);
265 }
266}
267
268void
Jason Evans738e0892015-02-18 01:15:50 -0800269arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800270{
271
272 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800273 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800274 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
275 arena->ndirty -= arena_chunk_dirty_npages(node);
276 }
277}
278
Jason Evansaf1f5922014-10-30 16:38:08 -0700279JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700280arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800281{
282 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800283 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700284 arena_chunk_map_misc_t *miscelm;
285 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800286
Jason Evans1e0a6362010-03-13 13:41:58 -0800287 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700288 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800289
Jason Evans9e1810c2016-02-24 12:42:23 -0800290 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700291 miscelm = arena_run_to_miscelm(run);
292 rpages = arena_miscelm_to_rpages(miscelm);
293 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700294 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800295 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800296 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800297}
298
Jason Evansaf1f5922014-10-30 16:38:08 -0700299JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800300arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800301{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700302 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700303 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
304 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700305 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700306 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800307 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700308
Jason Evans49f7e8f2011-03-15 13:59:15 -0700309 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800310 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700311 assert(((uintptr_t)ptr -
312 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700313 (uintptr_t)bin_info->reg0_offset)) %
314 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700315 assert((uintptr_t)ptr >=
316 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700317 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700318 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700319 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800320
Jason Evans0c5dd032014-09-29 01:31:39 -0700321 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800322 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800323}
324
Jason Evansaf1f5922014-10-30 16:38:08 -0700325JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800326arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
327{
328
Jason Evansbd87b012014-04-15 16:35:08 -0700329 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
330 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800331 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
332 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800333}
334
Jason Evansaf1f5922014-10-30 16:38:08 -0700335JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700336arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
337{
338
Jason Evansbd87b012014-04-15 16:35:08 -0700339 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
340 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700341}
342
Jason Evansaf1f5922014-10-30 16:38:08 -0700343JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800344arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700345{
Jason Evansd4bab212010-10-24 20:08:37 -0700346 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700347 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700348
Jason Evansdda90f52013-10-19 23:48:40 -0700349 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700350 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700351 assert(p[i] == 0);
352}
Jason Evans21fb95b2010-10-18 17:45:40 -0700353
Jason Evanse476f8a2010-01-16 09:53:50 -0800354static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800355arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800356{
357
358 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800359 size_t cactive_add = CHUNK_CEILING((arena->nactive +
360 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700361 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800362 if (cactive_add != 0)
363 stats_cactive_add(cactive_add);
364 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800365 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800366}
367
368static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800369arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800370{
371
372 if (config_stats) {
373 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
374 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
375 if (cactive_sub != 0)
376 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800377 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800378 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800379}
380
381static void
382arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700383 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800384{
385 size_t total_pages, rem_pages;
386
Jason Evans8fadb1a2015-08-04 10:49:46 -0700387 assert(flag_dirty == 0 || flag_decommitted == 0);
388
Jason Evansaa5113b2014-01-14 16:23:03 -0800389 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
390 LG_PAGE;
391 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
392 flag_dirty);
393 assert(need_pages <= total_pages);
394 rem_pages = total_pages - need_pages;
395
Qinfan Wu90737fc2014-07-21 19:39:20 -0700396 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700397 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800398 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800399 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800400
401 /* Keep track of trailing unused pages for later use. */
402 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700403 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700404 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
405 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700406
Jason Evans1f27abc2015-08-11 12:42:33 -0700407 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
408 (rem_pages << LG_PAGE), flags |
409 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
410 flag_unzeroed_mask));
411 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
412 (rem_pages << LG_PAGE), flags |
413 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
414 flag_unzeroed_mask));
415 if (flag_dirty != 0) {
416 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
417 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800418 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700419 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800420 }
421}
422
Jason Evans8fadb1a2015-08-04 10:49:46 -0700423static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800424arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
425 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800426{
427 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700428 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300429 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700430 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700431
Jason Evanse476f8a2010-01-16 09:53:50 -0800432 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700433 miscelm = arena_run_to_miscelm(run);
434 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700435 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700436 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700437 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800438 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800439
Jason Evansde249c82015-08-09 16:47:27 -0700440 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
441 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700442 return (true);
443
Jason Evansc368f8c2013-10-29 18:17:42 -0700444 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800445 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700446 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700447 }
448
Jason Evansaa5113b2014-01-14 16:23:03 -0800449 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700450 if (flag_decommitted != 0) {
451 /* The run is untouched, and therefore zeroed. */
452 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
453 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
454 (need_pages << LG_PAGE));
455 } else if (flag_dirty != 0) {
456 /* The run is dirty, so all pages must be zeroed. */
457 arena_run_zero(chunk, run_ind, need_pages);
458 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800459 /*
460 * The run is clean, so some pages may be zeroed (i.e.
461 * never before touched).
462 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300463 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800464 for (i = 0; i < need_pages; i++) {
465 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
466 != 0)
467 arena_run_zero(chunk, run_ind+i, 1);
468 else if (config_debug) {
469 arena_run_page_validate_zeroed(chunk,
470 run_ind+i);
471 } else {
472 arena_run_page_mark_zeroed(chunk,
473 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700474 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800475 }
476 }
Jason Evans19b3d612010-03-18 20:36:40 -0700477 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700478 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700479 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800480 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800481
482 /*
483 * Set the last element first, in case the run only contains one page
484 * (i.e. both statements set the same element).
485 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700486 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
487 CHUNK_MAP_UNZEROED : 0;
488 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
489 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
490 run_ind+need_pages-1)));
491 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
492 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700493 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800494}
495
Jason Evans8fadb1a2015-08-04 10:49:46 -0700496static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800497arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700498{
499
Jason Evans8fadb1a2015-08-04 10:49:46 -0700500 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700501}
502
Jason Evans8fadb1a2015-08-04 10:49:46 -0700503static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800504arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700505{
506
Jason Evans8fadb1a2015-08-04 10:49:46 -0700507 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800508}
509
Jason Evans8fadb1a2015-08-04 10:49:46 -0700510static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800511arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700512 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800513{
514 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700515 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700516 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800517
518 assert(binind != BININD_INVALID);
519
520 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700521 miscelm = arena_run_to_miscelm(run);
522 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800523 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700524 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800525 need_pages = (size >> LG_PAGE);
526 assert(need_pages > 0);
527
Jason Evans8fadb1a2015-08-04 10:49:46 -0700528 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
529 run_ind << LG_PAGE, size, arena->ind))
530 return (true);
531
532 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
533 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800534
Jason Evans381c23d2014-10-10 23:01:03 -0700535 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700536 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
537 run_ind+i);
538 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
539 flag_unzeroed);
540 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800541 arena_run_page_validate_zeroed(chunk, run_ind+i);
542 }
Jason Evansbd87b012014-04-15 16:35:08 -0700543 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800544 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700545 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800546}
547
548static arena_chunk_t *
549arena_chunk_init_spare(arena_t *arena)
550{
551 arena_chunk_t *chunk;
552
553 assert(arena->spare != NULL);
554
555 chunk = arena->spare;
556 arena->spare = NULL;
557
558 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
559 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
560 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700561 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800562 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700563 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800564 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
565 arena_mapbits_dirty_get(chunk, chunk_npages-1));
566
567 return (chunk);
568}
569
Jason Evans99bd94f2015-02-18 16:40:53 -0800570static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700571arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evans5c77af92016-11-14 18:27:23 -0800572 size_t sn, bool zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800573{
574
Jason Evans8fadb1a2015-08-04 10:49:46 -0700575 /*
576 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700577 * arena chunks. Arbitrarily mark them as committed. The commit state
578 * of runs is tracked individually, and upon chunk deallocation the
579 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700580 */
Jason Evans5c77af92016-11-14 18:27:23 -0800581 extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800582 extent_node_achunk_set(&chunk->node, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700583 return (chunk_register(tsdn, chunk, &chunk->node));
Jason Evans99bd94f2015-02-18 16:40:53 -0800584}
585
586static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700587arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700588 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800589{
590 arena_chunk_t *chunk;
Jason Evans5c77af92016-11-14 18:27:23 -0800591 size_t sn;
Jason Evans99bd94f2015-02-18 16:40:53 -0800592
Jason Evansc1e00ef2016-05-10 22:21:10 -0700593 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400594
Jason Evansc1e00ef2016-05-10 22:21:10 -0700595 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800596 NULL, chunksize, chunksize, &sn, zero, commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700597 if (chunk != NULL && !*commit) {
598 /* Commit header. */
599 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
600 LG_PAGE, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700601 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800602 (void *)chunk, chunksize, sn, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700603 chunk = NULL;
604 }
605 }
Jason Evans5c77af92016-11-14 18:27:23 -0800606 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
607 *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700608 if (!*commit) {
609 /* Undo commit of header. */
610 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
611 LG_PAGE, arena->ind);
612 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700613 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
Jason Evans5c77af92016-11-14 18:27:23 -0800614 chunksize, sn, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800615 chunk = NULL;
616 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800617
Jason Evansc1e00ef2016-05-10 22:21:10 -0700618 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800619 return (chunk);
620}
621
Jason Evansaa5113b2014-01-14 16:23:03 -0800622static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700623arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
624 bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700625{
626 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400627 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans5c77af92016-11-14 18:27:23 -0800628 size_t sn;
Jason Evanse2deab72014-05-15 22:22:27 -0700629
Jason Evansc1e00ef2016-05-10 22:21:10 -0700630 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
Jason Evans5c77af92016-11-14 18:27:23 -0800631 chunksize, &sn, zero, commit, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700632 if (chunk != NULL) {
Jason Evans5c77af92016-11-14 18:27:23 -0800633 if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700634 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
Jason Evans5c77af92016-11-14 18:27:23 -0800635 chunksize, sn, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700636 return (NULL);
637 }
Jason Evansb49a3342015-07-28 11:28:19 -0400638 }
639 if (chunk == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700640 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700641 &chunk_hooks, zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400642 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800643
Jason Evans4581b972014-11-27 17:22:36 -0200644 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700645 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200646 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
647 }
Jason Evanse2deab72014-05-15 22:22:27 -0700648
649 return (chunk);
650}
651
Jason Evanse2deab72014-05-15 22:22:27 -0700652static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700653arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
Jason Evansaa5113b2014-01-14 16:23:03 -0800654{
655 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700656 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700657 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800658
659 assert(arena->spare == NULL);
660
661 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700662 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700663 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800664 if (chunk == NULL)
665 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800666
Jason Evanse98a6202016-11-17 13:36:17 -0800667 chunk->hugepage = true;
668
Jason Evansaa5113b2014-01-14 16:23:03 -0800669 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800670 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evansf86bc082016-03-31 11:19:46 -0700671 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
672 * or decommitted chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800673 */
Jason Evans45186f02015-08-10 23:03:34 -0700674 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
675 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
676 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
677 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800678 /*
679 * There is no need to initialize the internal page map entries unless
680 * the chunk is not zeroed.
681 */
Jason Evans551ebc42014-10-03 10:16:09 -0700682 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700683 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700684 (void *)arena_bitselm_get_const(chunk, map_bias+1),
685 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
686 chunk_npages-1) -
687 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800688 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700689 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800690 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700691 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700692 *)arena_bitselm_get_const(chunk, map_bias+1),
693 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
694 chunk_npages-1) -
695 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800696 if (config_debug) {
697 for (i = map_bias+1; i < chunk_npages-1; i++) {
698 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700699 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800700 }
701 }
702 }
Jason Evans155bfa72014-10-05 17:54:10 -0700703 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700704 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800705
706 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700707}
708
Jason Evanse476f8a2010-01-16 09:53:50 -0800709static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700710arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800711{
712 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800713
Jason Evansaa5113b2014-01-14 16:23:03 -0800714 if (arena->spare != NULL)
715 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700716 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700717 chunk = arena_chunk_init_hard(tsdn, arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700718 if (chunk == NULL)
719 return (NULL);
720 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800721
Jason Evans19ff2ce2016-04-22 14:37:17 -0700722 ql_elm_new(&chunk->node, ql_link);
723 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700724 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700725
Jason Evanse476f8a2010-01-16 09:53:50 -0800726 return (chunk);
727}
728
729static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700730arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700731{
Jason Evanse98a6202016-11-17 13:36:17 -0800732 size_t sn, hugepage;
Jason Evans19ff2ce2016-04-22 14:37:17 -0700733 bool committed;
734 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
735
736 chunk_deregister(chunk, &chunk->node);
737
Jason Evans5c77af92016-11-14 18:27:23 -0800738 sn = extent_node_sn_get(&chunk->node);
Jason Evanse98a6202016-11-17 13:36:17 -0800739 hugepage = chunk->hugepage;
Jason Evans19ff2ce2016-04-22 14:37:17 -0700740 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
741 if (!committed) {
742 /*
743 * Decommit the header. Mark the chunk as decommitted even if
744 * header decommit fails, since treating a partially committed
745 * chunk as committed has a high potential for causing later
746 * access of decommitted memory.
747 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700748 chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700749 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
750 arena->ind);
751 }
Jason Evanse98a6202016-11-17 13:36:17 -0800752 if (!hugepage) {
753 /*
754 * Convert chunk back to the default state, so that all
755 * subsequent chunk allocations start out with chunks that can
756 * be backed by transparent huge pages.
757 */
758 pages_huge(chunk, chunksize);
759 }
Jason Evans19ff2ce2016-04-22 14:37:17 -0700760
Jason Evansc1e00ef2016-05-10 22:21:10 -0700761 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
Jason Evans5c77af92016-11-14 18:27:23 -0800762 sn, committed);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700763
764 if (config_stats) {
765 arena->stats.mapped -= chunksize;
766 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
767 }
768}
769
770static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700771arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700772{
773
774 assert(arena->spare != spare);
775
776 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
777 arena_run_dirty_remove(arena, spare, map_bias,
778 chunk_npages-map_bias);
779 }
780
Jason Evansc1e00ef2016-05-10 22:21:10 -0700781 arena_chunk_discard(tsdn, arena, spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700782}
783
784static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700785arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800786{
Jason Evans19ff2ce2016-04-22 14:37:17 -0700787 arena_chunk_t *spare;
Qinfan Wu04d60a12014-07-18 14:21:17 -0700788
Jason Evans30fe12b2012-05-10 17:09:17 -0700789 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
790 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
791 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700792 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700793 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700794 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700795 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
796 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700797 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
798 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700799
Dave Watson3417a302016-02-23 12:06:21 -0800800 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700801 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800802
Jason Evans19ff2ce2016-04-22 14:37:17 -0700803 ql_remove(&arena->achunks, &chunk->node, ql_link);
804 spare = arena->spare;
805 arena->spare = chunk;
806 if (spare != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700807 arena_spare_discard(tsdn, arena, spare);
Jason Evanse476f8a2010-01-16 09:53:50 -0800808}
809
Jason Evans9b41ac92014-10-14 22:20:00 -0700810static void
811arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
812{
Jason Evansd01fd192015-08-19 15:21:32 -0700813 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700814
815 cassert(config_stats);
816
817 arena->stats.nmalloc_huge++;
818 arena->stats.allocated_huge += usize;
819 arena->stats.hstats[index].nmalloc++;
820 arena->stats.hstats[index].curhchunks++;
821}
822
823static void
824arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
825{
Jason Evansd01fd192015-08-19 15:21:32 -0700826 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700827
828 cassert(config_stats);
829
830 arena->stats.nmalloc_huge--;
831 arena->stats.allocated_huge -= usize;
832 arena->stats.hstats[index].nmalloc--;
833 arena->stats.hstats[index].curhchunks--;
834}
835
836static void
837arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
838{
Jason Evansd01fd192015-08-19 15:21:32 -0700839 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700840
841 cassert(config_stats);
842
843 arena->stats.ndalloc_huge++;
844 arena->stats.allocated_huge -= usize;
845 arena->stats.hstats[index].ndalloc++;
846 arena->stats.hstats[index].curhchunks--;
847}
848
849static void
Jason Evans7e674952016-04-25 13:26:54 -0700850arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
851{
852 szind_t index = size2index(usize) - nlclasses - NBINS;
853
854 cassert(config_stats);
855
856 arena->stats.ndalloc_huge++;
857 arena->stats.hstats[index].ndalloc--;
858}
859
860static void
Jason Evans9b41ac92014-10-14 22:20:00 -0700861arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
862{
Jason Evansd01fd192015-08-19 15:21:32 -0700863 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700864
865 cassert(config_stats);
866
867 arena->stats.ndalloc_huge--;
868 arena->stats.allocated_huge += usize;
869 arena->stats.hstats[index].ndalloc--;
870 arena->stats.hstats[index].curhchunks++;
871}
872
873static void
874arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
875{
876
877 arena_huge_dalloc_stats_update(arena, oldsize);
878 arena_huge_malloc_stats_update(arena, usize);
879}
880
881static void
882arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
883 size_t usize)
884{
885
886 arena_huge_dalloc_stats_update_undo(arena, oldsize);
887 arena_huge_malloc_stats_update_undo(arena, usize);
888}
889
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800890extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700891arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800892{
893 extent_node_t *node;
894
Jason Evansc1e00ef2016-05-10 22:21:10 -0700895 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800896 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800897 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700898 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
899 return (base_alloc(tsdn, sizeof(extent_node_t)));
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800900 }
Jason Evans2195ba42015-02-15 16:43:52 -0800901 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700902 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800903 return (node);
904}
905
906void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700907arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800908{
909
Jason Evansc1e00ef2016-05-10 22:21:10 -0700910 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800911 ql_elm_new(node, ql_link);
912 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700913 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800914}
915
Jason Evans99bd94f2015-02-18 16:40:53 -0800916static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700917arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evans5c77af92016-11-14 18:27:23 -0800918 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
919 bool *zero, size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700920{
921 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700922 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700923
Jason Evansc1e00ef2016-05-10 22:21:10 -0700924 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
Jason Evans5c77af92016-11-14 18:27:23 -0800925 alignment, sn, zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700926 if (ret == NULL) {
927 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700928 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700929 if (config_stats) {
930 arena_huge_malloc_stats_update_undo(arena, usize);
931 arena->stats.mapped -= usize;
932 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800933 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700934 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700935 }
936
Jason Evans99bd94f2015-02-18 16:40:53 -0800937 return (ret);
938}
Jason Evans9b41ac92014-10-14 22:20:00 -0700939
Jason Evans99bd94f2015-02-18 16:40:53 -0800940void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700941arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans5c77af92016-11-14 18:27:23 -0800942 size_t alignment, size_t *sn, bool *zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800943{
944 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400945 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800946 size_t csize = CHUNK_CEILING(usize);
Jason Evanse9012632016-11-03 17:11:01 -0700947 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800948
Jason Evansc1e00ef2016-05-10 22:21:10 -0700949 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800950
951 /* Optimistically update stats. */
952 if (config_stats) {
953 arena_huge_malloc_stats_update(arena, usize);
954 arena->stats.mapped += usize;
955 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800956 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800957
Jason Evansc1e00ef2016-05-10 22:21:10 -0700958 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
Jason Evans5c77af92016-11-14 18:27:23 -0800959 alignment, sn, zero, &commit, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700960 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800961 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700962 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800963 usize, alignment, sn, zero, csize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800964 }
965
Jason Evans9b41ac92014-10-14 22:20:00 -0700966 return (ret);
967}
968
969void
Jason Evans5c77af92016-11-14 18:27:23 -0800970arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
971 size_t sn)
Jason Evans9b41ac92014-10-14 22:20:00 -0700972{
Jason Evansb49a3342015-07-28 11:28:19 -0400973 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800974 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700975
Jason Evans99bd94f2015-02-18 16:40:53 -0800976 csize = CHUNK_CEILING(usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700977 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700978 if (config_stats) {
979 arena_huge_dalloc_stats_update(arena, usize);
980 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700981 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800982 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800983
Jason Evans5c77af92016-11-14 18:27:23 -0800984 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700985 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700986}
987
988void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700989arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700990 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700991{
992
993 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
994 assert(oldsize != usize);
995
Jason Evansc1e00ef2016-05-10 22:21:10 -0700996 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700997 if (config_stats)
998 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800999 if (oldsize < usize)
1000 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1001 else
1002 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001003 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001004}
1005
1006void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001007arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evans5c77af92016-11-14 18:27:23 -08001008 size_t oldsize, size_t usize, size_t sn)
Jason Evans9b41ac92014-10-14 22:20:00 -07001009{
Jason Evans9b41ac92014-10-14 22:20:00 -07001010 size_t udiff = oldsize - usize;
1011 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1012
Jason Evansc1e00ef2016-05-10 22:21:10 -07001013 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001014 if (config_stats) {
1015 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001016 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -07001017 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -07001018 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001019 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -08001020
Jason Evans2012d5a2014-11-17 09:54:49 -08001021 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -04001022 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -08001023 void *nchunk = (void *)((uintptr_t)chunk +
1024 CHUNK_CEILING(usize));
1025
Jason Evansc1e00ef2016-05-10 22:21:10 -07001026 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evans5c77af92016-11-14 18:27:23 -08001027 sn, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001028 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001029 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001030}
1031
Jason Evansb49a3342015-07-28 11:28:19 -04001032static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001033arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07001034 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
Jason Evans5c77af92016-11-14 18:27:23 -08001035 size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -08001036{
1037 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001038 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -08001039
Jason Evansc1e00ef2016-05-10 22:21:10 -07001040 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evans5c77af92016-11-14 18:27:23 -08001041 chunksize, sn, zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001042 if (err) {
1043 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001044 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001045 if (config_stats) {
1046 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1047 usize);
1048 arena->stats.mapped -= cdiff;
1049 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001050 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001051 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001052 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1053 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001054 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evans5c77af92016-11-14 18:27:23 -08001055 *sn, *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001056 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001057 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001058 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001059}
1060
1061bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001062arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07001063 size_t oldsize, size_t usize, bool *zero)
Jason Evans9b41ac92014-10-14 22:20:00 -07001064{
Jason Evans99bd94f2015-02-18 16:40:53 -08001065 bool err;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001066 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001067 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001068 size_t udiff = usize - oldsize;
1069 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
Jason Evans5c77af92016-11-14 18:27:23 -08001070 size_t sn;
Jason Evanse9012632016-11-03 17:11:01 -07001071 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001072
Jason Evansc1e00ef2016-05-10 22:21:10 -07001073 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001074
1075 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001076 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001077 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1078 arena->stats.mapped += cdiff;
1079 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001080 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001081
Jason Evansc1e00ef2016-05-10 22:21:10 -07001082 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evans5c77af92016-11-14 18:27:23 -08001083 chunksize, &sn, zero, &commit, true) == NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001084 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001085 if (err) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001086 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
Jason Evans5c77af92016-11-14 18:27:23 -08001087 &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
1088 udiff, cdiff);
Jason Evansb49a3342015-07-28 11:28:19 -04001089 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1090 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001091 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evans5c77af92016-11-14 18:27:23 -08001092 sn, *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001093 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001094 }
1095
Jason Evans99bd94f2015-02-18 16:40:53 -08001096 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001097}
1098
Jason Evansaa282662015-07-15 16:02:21 -07001099/*
1100 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001101 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1102 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001103 */
Jason Evans97c04a92015-03-06 19:57:36 -08001104static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001105arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001106{
Jason Evansf193fd82016-04-08 14:17:57 -07001107 pszind_t pind, i;
Dave Watson3417a302016-02-23 12:06:21 -08001108
Jason Evansf193fd82016-04-08 14:17:57 -07001109 pind = psz2ind(run_quantize_ceil(size));
1110
Jason Evansa4e83e82016-11-07 09:37:12 -08001111 for (i = pind; pind2sz(i) <= chunksize; i++) {
Jason Evansc6a2c392016-03-26 17:30:37 -07001112 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
Jason Evansf193fd82016-04-08 14:17:57 -07001113 &arena->runs_avail[i]);
Jason Evansc6a2c392016-03-26 17:30:37 -07001114 if (miscelm != NULL)
Dave Watson3417a302016-02-23 12:06:21 -08001115 return (&miscelm->run);
1116 }
1117
1118 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001119}
1120
Jason Evanse476f8a2010-01-16 09:53:50 -08001121static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001122arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001123{
Jason Evans32896a92016-11-03 22:21:34 -07001124 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001125 if (run != NULL) {
1126 if (arena_run_split_large(arena, run, size, zero))
1127 run = NULL;
1128 }
Jason Evans97c04a92015-03-06 19:57:36 -08001129 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001130}
1131
1132static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001133arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001134{
1135 arena_chunk_t *chunk;
1136 arena_run_t *run;
1137
Jason Evansfc0b3b72014-10-09 17:54:06 -07001138 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001139 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001140
1141 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001142 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001143 if (run != NULL)
1144 return (run);
1145
Jason Evanse476f8a2010-01-16 09:53:50 -08001146 /*
1147 * No usable runs. Create a new chunk from which to allocate the run.
1148 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001149 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001150 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001151 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001152 if (arena_run_split_large(arena, run, size, zero))
1153 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001154 return (run);
1155 }
1156
1157 /*
1158 * arena_chunk_alloc() failed, but another thread may have made
1159 * sufficient memory available while this one dropped arena->lock in
1160 * arena_chunk_alloc(), so search one more time.
1161 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001162 return (arena_run_alloc_large_helper(arena, size, zero));
1163}
1164
1165static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001166arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001167{
Jason Evansaa282662015-07-15 16:02:21 -07001168 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001169 if (run != NULL) {
1170 if (arena_run_split_small(arena, run, size, binind))
1171 run = NULL;
1172 }
Jason Evans97c04a92015-03-06 19:57:36 -08001173 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001174}
1175
1176static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001177arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001178{
1179 arena_chunk_t *chunk;
1180 arena_run_t *run;
1181
Jason Evansfc0b3b72014-10-09 17:54:06 -07001182 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001183 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001184 assert(binind != BININD_INVALID);
1185
1186 /* Search the arena's chunks for the lowest best fit. */
1187 run = arena_run_alloc_small_helper(arena, size, binind);
1188 if (run != NULL)
1189 return (run);
1190
1191 /*
1192 * No usable runs. Create a new chunk from which to allocate the run.
1193 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001194 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evansaa5113b2014-01-14 16:23:03 -08001195 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001196 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001197 if (arena_run_split_small(arena, run, size, binind))
1198 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001199 return (run);
1200 }
1201
1202 /*
1203 * arena_chunk_alloc() failed, but another thread may have made
1204 * sufficient memory available while this one dropped arena->lock in
1205 * arena_chunk_alloc(), so search one more time.
1206 */
1207 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001208}
1209
Jason Evans8d6a3e82015-03-18 18:55:33 -07001210static bool
1211arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1212{
1213
Jason Evansbd16ea42015-03-24 15:59:28 -07001214 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1215 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001216}
1217
1218ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001219arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001220{
1221 ssize_t lg_dirty_mult;
1222
Jason Evansc1e00ef2016-05-10 22:21:10 -07001223 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001224 lg_dirty_mult = arena->lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001225 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001226
1227 return (lg_dirty_mult);
1228}
1229
1230bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001231arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001232{
1233
1234 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1235 return (true);
1236
Jason Evansc1e00ef2016-05-10 22:21:10 -07001237 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001238 arena->lg_dirty_mult = lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001239 arena_maybe_purge(tsdn, arena);
1240 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001241
1242 return (false);
1243}
1244
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001245static void
Jason Evans243f7a02016-02-19 20:09:31 -08001246arena_decay_deadline_init(arena_t *arena)
1247{
1248
1249 assert(opt_purge == purge_mode_decay);
1250
1251 /*
1252 * Generate a new deadline that is uniformly random within the next
1253 * epoch after the current one.
1254 */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001255 nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1256 nstime_add(&arena->decay.deadline, &arena->decay.interval);
1257 if (arena->decay.time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001258 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001259
Jason Evans5d6cb6e2016-11-07 10:52:44 -08001260 nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
Jason Evans94e7ffa2016-10-10 20:32:19 -07001261 nstime_ns(&arena->decay.interval)));
1262 nstime_add(&arena->decay.deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001263 }
1264}
1265
1266static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001267arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001268{
1269
1270 assert(opt_purge == purge_mode_decay);
1271
Jason Evans94e7ffa2016-10-10 20:32:19 -07001272 return (nstime_compare(&arena->decay.deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001273}
1274
1275static size_t
1276arena_decay_backlog_npages_limit(const arena_t *arena)
1277{
1278 static const uint64_t h_steps[] = {
1279#define STEP(step, h, x, y) \
1280 h,
1281 SMOOTHSTEP
1282#undef STEP
1283 };
1284 uint64_t sum;
1285 size_t npages_limit_backlog;
1286 unsigned i;
1287
1288 assert(opt_purge == purge_mode_decay);
1289
1290 /*
1291 * For each element of decay_backlog, multiply by the corresponding
1292 * fixed-point smoothstep decay factor. Sum the products, then divide
1293 * to round down to the nearest whole number of pages.
1294 */
1295 sum = 0;
1296 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
Jason Evans94e7ffa2016-10-10 20:32:19 -07001297 sum += arena->decay.backlog[i] * h_steps[i];
rustyx00432332016-04-12 09:50:54 +02001298 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
Jason Evans243f7a02016-02-19 20:09:31 -08001299
1300 return (npages_limit_backlog);
1301}
1302
1303static void
Jason Evansd419bb02016-10-11 15:30:01 -07001304arena_decay_backlog_update_last(arena_t *arena)
1305{
1306 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1307 arena->ndirty - arena->decay.ndirty : 0;
1308 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1309}
1310
1311static void
1312arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1313{
1314
1315 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1316 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1317 sizeof(size_t));
1318 } else {
1319 size_t nadvance_z = (size_t)nadvance_u64;
1320
1321 assert((uint64_t)nadvance_z == nadvance_u64);
1322
1323 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1324 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1325 if (nadvance_z > 1) {
1326 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1327 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1328 }
1329 }
1330
1331 arena_decay_backlog_update_last(arena);
1332}
1333
1334static void
1335arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001336{
rustyx00432332016-04-12 09:50:54 +02001337 uint64_t nadvance_u64;
Jason Evans9bad0792016-02-21 11:25:02 -08001338 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001339
1340 assert(opt_purge == purge_mode_decay);
1341 assert(arena_decay_deadline_reached(arena, time));
1342
Jason Evans9bad0792016-02-21 11:25:02 -08001343 nstime_copy(&delta, time);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001344 nstime_subtract(&delta, &arena->decay.epoch);
1345 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001346 assert(nadvance_u64 > 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001347
rustyx00432332016-04-12 09:50:54 +02001348 /* Add nadvance_u64 decay intervals to epoch. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001349 nstime_copy(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001350 nstime_imultiply(&delta, nadvance_u64);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001351 nstime_add(&arena->decay.epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001352
1353 /* Set a new deadline. */
1354 arena_decay_deadline_init(arena);
1355
1356 /* Update the backlog. */
Jason Evansd419bb02016-10-11 15:30:01 -07001357 arena_decay_backlog_update(arena, nadvance_u64);
Jason Evans243f7a02016-02-19 20:09:31 -08001358}
1359
Jason Evansd419bb02016-10-11 15:30:01 -07001360static void
1361arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001362{
Jason Evansd419bb02016-10-11 15:30:01 -07001363 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001364
Jason Evansd419bb02016-10-11 15:30:01 -07001365 if (arena->ndirty > ndirty_limit)
1366 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1367 arena->decay.ndirty = arena->ndirty;
1368}
Jason Evans243f7a02016-02-19 20:09:31 -08001369
Jason Evansd419bb02016-10-11 15:30:01 -07001370static void
1371arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1372{
Jason Evans243f7a02016-02-19 20:09:31 -08001373
Jason Evansd419bb02016-10-11 15:30:01 -07001374 arena_decay_epoch_advance_helper(arena, time);
1375 arena_decay_epoch_advance_purge(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001376}
1377
1378static void
1379arena_decay_init(arena_t *arena, ssize_t decay_time)
1380{
1381
Jason Evans94e7ffa2016-10-10 20:32:19 -07001382 arena->decay.time = decay_time;
Jason Evans243f7a02016-02-19 20:09:31 -08001383 if (decay_time > 0) {
Jason Evans94e7ffa2016-10-10 20:32:19 -07001384 nstime_init2(&arena->decay.interval, decay_time, 0);
1385 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001386 }
1387
Jason Evans94e7ffa2016-10-10 20:32:19 -07001388 nstime_init(&arena->decay.epoch, 0);
1389 nstime_update(&arena->decay.epoch);
1390 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
Jason Evans243f7a02016-02-19 20:09:31 -08001391 arena_decay_deadline_init(arena);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001392 arena->decay.ndirty = arena->ndirty;
Jason Evans94e7ffa2016-10-10 20:32:19 -07001393 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
Jason Evans243f7a02016-02-19 20:09:31 -08001394}
1395
1396static bool
1397arena_decay_time_valid(ssize_t decay_time)
1398{
1399
Jason Evans022f6892016-03-02 22:41:32 -08001400 if (decay_time < -1)
1401 return (false);
1402 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1403 return (true);
1404 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001405}
1406
1407ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001408arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001409{
1410 ssize_t decay_time;
1411
Jason Evansc1e00ef2016-05-10 22:21:10 -07001412 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001413 decay_time = arena->decay.time;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001414 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001415
1416 return (decay_time);
1417}
1418
1419bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001420arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
Jason Evans243f7a02016-02-19 20:09:31 -08001421{
1422
1423 if (!arena_decay_time_valid(decay_time))
1424 return (true);
1425
Jason Evansc1e00ef2016-05-10 22:21:10 -07001426 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001427 /*
1428 * Restart decay backlog from scratch, which may cause many dirty pages
1429 * to be immediately purged. It would conceptually be possible to map
1430 * the old backlog onto the new backlog, but there is no justification
1431 * for such complexity since decay_time changes are intended to be
1432 * infrequent, either between the {-1, 0, >0} states, or a one-time
1433 * arbitrary change during initial arena configuration.
1434 */
1435 arena_decay_init(arena, decay_time);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001436 arena_maybe_purge(tsdn, arena);
1437 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001438
1439 return (false);
1440}
1441
1442static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001443arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001444{
1445
Jason Evans243f7a02016-02-19 20:09:31 -08001446 assert(opt_purge == purge_mode_ratio);
1447
Jason Evanse3d13062012-10-30 15:42:37 -07001448 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001449 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001450 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001451
Jason Evans0a9f9a42015-06-22 18:50:32 -07001452 /*
1453 * Iterate, since preventing recursive purging could otherwise leave too
1454 * many dirty pages.
1455 */
1456 while (true) {
1457 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1458 if (threshold < chunk_npages)
1459 threshold = chunk_npages;
1460 /*
1461 * Don't purge unless the number of purgeable pages exceeds the
1462 * threshold.
1463 */
1464 if (arena->ndirty <= threshold)
1465 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001466 arena_purge_to_limit(tsdn, arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001467 }
Jason Evans05b21be2010-03-14 17:36:10 -07001468}
1469
Jason Evans243f7a02016-02-19 20:09:31 -08001470static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001471arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001472{
Jason Evans9bad0792016-02-21 11:25:02 -08001473 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001474
1475 assert(opt_purge == purge_mode_decay);
1476
1477 /* Purge all or nothing if the option is disabled. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001478 if (arena->decay.time <= 0) {
1479 if (arena->decay.time == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001480 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001481 return;
1482 }
1483
Jason Evans45a5bf62016-10-10 22:15:10 -07001484 nstime_init(&time, 0);
1485 nstime_update(&time);
1486 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1487 &time) > 0)) {
1488 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001489 * Time went backwards. Move the epoch back in time and
1490 * generate a new deadline, with the expectation that time
1491 * typically flows forward for long enough periods of time that
1492 * epochs complete. Unfortunately, this strategy is susceptible
1493 * to clock jitter triggering premature epoch advances, but
1494 * clock jitter estimation and compensation isn't feasible here
1495 * because calls into this code are event-driven.
Jason Evans45a5bf62016-10-10 22:15:10 -07001496 */
1497 nstime_copy(&arena->decay.epoch, &time);
Jason Evansd419bb02016-10-11 15:30:01 -07001498 arena_decay_deadline_init(arena);
Jason Evans45a5bf62016-10-10 22:15:10 -07001499 } else {
1500 /* Verify that time does not go backwards. */
1501 assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001502 }
1503
Jason Evans243f7a02016-02-19 20:09:31 -08001504 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001505 * If the deadline has been reached, advance to the current epoch and
1506 * purge to the new limit if necessary. Note that dirty pages created
1507 * during the current epoch are not subject to purge until a future
1508 * epoch, so as a result purging only happens during epoch advances.
Jason Evans243f7a02016-02-19 20:09:31 -08001509 */
Jason Evansd419bb02016-10-11 15:30:01 -07001510 if (arena_decay_deadline_reached(arena, &time))
1511 arena_decay_epoch_advance(tsdn, arena, &time);
Jason Evans243f7a02016-02-19 20:09:31 -08001512}
1513
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001514void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001515arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001516{
1517
1518 /* Don't recursively purge. */
1519 if (arena->purging)
1520 return;
1521
Jason Evans243f7a02016-02-19 20:09:31 -08001522 if (opt_purge == purge_mode_ratio)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001523 arena_maybe_purge_ratio(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001524 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001525 arena_maybe_purge_decay(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001526}
1527
Qinfan Wua244e502014-07-21 10:23:36 -07001528static size_t
1529arena_dirty_count(arena_t *arena)
1530{
1531 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001532 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001533 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001534
Jason Evans38e42d32015-03-10 18:15:40 -07001535 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001536 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001537 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001538 size_t npages;
1539
Jason Evansf5c8f372015-03-10 18:29:49 -07001540 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001541 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001542 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001543 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001544 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1545 rdelm);
1546 arena_chunk_map_misc_t *miscelm =
1547 arena_rd_to_miscelm(rdelm);
1548 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001549 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1550 0);
1551 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1552 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1553 npages = arena_mapbits_unallocated_size_get(chunk,
1554 pageind) >> LG_PAGE;
1555 }
Qinfan Wua244e502014-07-21 10:23:36 -07001556 ndirty += npages;
1557 }
1558
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001559 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001560}
1561
1562static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001563arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001564 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001565 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001566{
Jason Evans38e42d32015-03-10 18:15:40 -07001567 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001568 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001569 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001570
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001571 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001572 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001573 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001574 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001575 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001576 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001577
Jason Evansf5c8f372015-03-10 18:29:49 -07001578 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001579 extent_node_t *chunkselm_next;
Jason Evans5c77af92016-11-14 18:27:23 -08001580 size_t sn;
Jason Evanse9012632016-11-03 17:11:01 -07001581 bool zero, commit;
Jason Evansee41ad42015-02-15 18:04:46 -08001582 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001583
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001584 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001585 if (opt_purge == purge_mode_decay && arena->ndirty -
1586 (nstashed + npages) < ndirty_limit)
1587 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001588
Jason Evans738e0892015-02-18 01:15:50 -08001589 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001590 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001591 * Allocate. chunkselm remains valid due to the
1592 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001593 */
Jason Evansee41ad42015-02-15 18:04:46 -08001594 zero = false;
Jason Evanse9012632016-11-03 17:11:01 -07001595 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001596 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001597 extent_node_addr_get(chunkselm),
Jason Evans5c77af92016-11-14 18:27:23 -08001598 extent_node_size_get(chunkselm), chunksize, &sn,
1599 &zero, &commit, false);
Jason Evans99bd94f2015-02-18 16:40:53 -08001600 assert(chunk == extent_node_addr_get(chunkselm));
1601 assert(zero == extent_node_zeroed_get(chunkselm));
1602 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001603 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001604 assert(npages == (extent_node_size_get(chunkselm) >>
1605 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001606 chunkselm = chunkselm_next;
1607 } else {
1608 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001609 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1610 arena_chunk_map_misc_t *miscelm =
1611 arena_rd_to_miscelm(rdelm);
1612 size_t pageind = arena_miscelm_to_pageind(miscelm);
1613 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001614 size_t run_size =
1615 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001616
Jason Evansee41ad42015-02-15 18:04:46 -08001617 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001618 if (opt_purge == purge_mode_decay && arena->ndirty -
1619 (nstashed + npages) < ndirty_limit)
1620 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001621
1622 assert(pageind + npages <= chunk_npages);
1623 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1624 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1625
1626 /*
1627 * If purging the spare chunk's run, make it available
1628 * prior to allocation.
1629 */
1630 if (chunk == arena->spare)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001631 arena_chunk_alloc(tsdn, arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001632
1633 /* Temporarily allocate the free dirty run. */
1634 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001635 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001636 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001637 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001638 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001639 assert(qr_next(rdelm, rd_link) == rdelm);
1640 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001641 }
Jason Evans38e42d32015-03-10 18:15:40 -07001642 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001643 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001644
Qinfan Wue9708002014-07-21 18:09:04 -07001645 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001646 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1647 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001648 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001649 }
Qinfan Wue9708002014-07-21 18:09:04 -07001650
1651 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001652}
1653
1654static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001655arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001656 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001657 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001658{
Qinfan Wue9708002014-07-21 18:09:04 -07001659 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001660 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001661 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001662
Jason Evansaa5113b2014-01-14 16:23:03 -08001663 if (config_stats)
1664 nmadvise = 0;
1665 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001666
Jason Evansc1e00ef2016-05-10 22:21:10 -07001667 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001668 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001669 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001670 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001671 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001672
Jason Evansf5c8f372015-03-10 18:29:49 -07001673 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001674 /*
1675 * Don't actually purge the chunk here because 1)
1676 * chunkselm is embedded in the chunk and must remain
1677 * valid, and 2) we deallocate the chunk in
1678 * arena_unstash_purged(), where it is destroyed,
1679 * decommitted, or purged, depending on chunk
1680 * deallocation policy.
1681 */
Jason Evansee41ad42015-02-15 18:04:46 -08001682 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001683 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001684 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001685 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001686 size_t pageind, run_size, flag_unzeroed, flags, i;
1687 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001688 arena_chunk_t *chunk =
1689 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001690 arena_chunk_map_misc_t *miscelm =
1691 arena_rd_to_miscelm(rdelm);
1692 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001693 run_size = arena_mapbits_large_size_get(chunk, pageind);
1694 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001695
Jason Evanse98a6202016-11-17 13:36:17 -08001696 /*
1697 * If this is the first run purged within chunk, mark
1698 * the chunk as non-huge. This will prevent all use of
1699 * transparent huge pages for this chunk until the chunk
1700 * as a whole is deallocated.
1701 */
1702 if (chunk->hugepage) {
1703 pages_nohuge(chunk, chunksize);
1704 chunk->hugepage = false;
1705 }
1706
Jason Evansee41ad42015-02-15 18:04:46 -08001707 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001708 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1709 assert(!arena_mapbits_decommitted_get(chunk,
1710 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001711 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1712 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1713 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001714 flag_unzeroed = 0;
1715 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001716 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001717 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001718 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001719 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1720 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001721 }
Jason Evans45186f02015-08-10 23:03:34 -07001722 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1723 flags);
1724 arena_mapbits_large_set(chunk, pageind, run_size,
1725 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001726
1727 /*
Jason Evans45186f02015-08-10 23:03:34 -07001728 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001729 * chunk_purge_wrapper() has returned whether the pages
1730 * were zeroed as a side effect of purging. This chunk
1731 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001732 * isn't currently owned by this thread, because the run
1733 * is marked as allocated, thus protecting it from being
1734 * modified by any other thread. As long as these
1735 * writes don't perturb the first and last elements'
1736 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1737 */
Jason Evans45186f02015-08-10 23:03:34 -07001738 for (i = 1; i < npages-1; i++) {
1739 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001740 flag_unzeroed);
1741 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001742 }
Qinfan Wue9708002014-07-21 18:09:04 -07001743
Jason Evansaa5113b2014-01-14 16:23:03 -08001744 npurged += npages;
1745 if (config_stats)
1746 nmadvise++;
1747 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001748 malloc_mutex_lock(tsdn, &arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001749
1750 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001751 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001752 arena->stats.purged += npurged;
1753 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001754
1755 return (npurged);
1756}
1757
1758static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001759arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001760 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001761 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001762{
Jason Evans38e42d32015-03-10 18:15:40 -07001763 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001764 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001765
Jason Evansb49a3342015-07-28 11:28:19 -04001766 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001767 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001768 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001769 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1770 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001771 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001772 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001773 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001774 void *addr = extent_node_addr_get(chunkselm);
1775 size_t size = extent_node_size_get(chunkselm);
Jason Evans5c77af92016-11-14 18:27:23 -08001776 size_t sn = extent_node_sn_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001777 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001778 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001779 extent_node_dirty_remove(chunkselm);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001780 arena_node_dalloc(tsdn, arena, chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001781 chunkselm = chunkselm_next;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001782 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
Jason Evans5c77af92016-11-14 18:27:23 -08001783 size, sn, zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001784 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001785 arena_chunk_t *chunk =
1786 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001787 arena_chunk_map_misc_t *miscelm =
1788 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001789 size_t pageind = arena_miscelm_to_pageind(miscelm);
1790 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1791 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001792 arena_run_t *run = &miscelm->run;
1793 qr_remove(rdelm, rd_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001794 arena_run_dalloc(tsdn, arena, run, false, true,
Jason Evansb2c0d632016-04-13 23:36:15 -07001795 decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001796 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001797 }
1798}
1799
Jason Evans243f7a02016-02-19 20:09:31 -08001800/*
1801 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1802 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1803 * desired state:
1804 * (arena->ndirty <= ndirty_limit)
1805 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1806 * violating the invariant:
1807 * (arena->ndirty >= ndirty_limit)
1808 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001809static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001810arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001811{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001812 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001813 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001814 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001815 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001816
Jason Evans0a9f9a42015-06-22 18:50:32 -07001817 arena->purging = true;
1818
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001819 /*
1820 * Calls to arena_dirty_count() are disabled even for debug builds
1821 * because overhead grows nonlinearly as memory usage increases.
1822 */
1823 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001824 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001825 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001826 }
Jason Evans243f7a02016-02-19 20:09:31 -08001827 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1828 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001829
1830 qr_new(&purge_runs_sentinel, rd_link);
1831 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1832
Jason Evansc1e00ef2016-05-10 22:21:10 -07001833 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001834 &purge_runs_sentinel, &purge_chunks_sentinel);
1835 if (npurge == 0)
1836 goto label_return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001837 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -07001838 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001839 assert(npurged == npurge);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001840 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001841 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001842
Jason Evans7372b152012-02-10 20:22:09 -08001843 if (config_stats)
1844 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001845
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001846label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001847 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001848}
1849
Jason Evans6005f072010-09-30 16:55:08 -07001850void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001851arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001852{
1853
Jason Evansc1e00ef2016-05-10 22:21:10 -07001854 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001855 if (all)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001856 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001857 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001858 arena_maybe_purge(tsdn, arena);
1859 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans6005f072010-09-30 16:55:08 -07001860}
1861
Jason Evanse476f8a2010-01-16 09:53:50 -08001862static void
Jason Evans19ff2ce2016-04-22 14:37:17 -07001863arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1864{
1865 size_t pageind, npages;
1866
1867 cassert(config_prof);
1868 assert(opt_prof);
1869
1870 /*
1871 * Iterate over the allocated runs and remove profiled allocations from
1872 * the sample set.
1873 */
1874 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1875 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1876 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1877 void *ptr = (void *)((uintptr_t)chunk + (pageind
1878 << LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001879 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1880 config_prof);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001881
1882 prof_free(tsd, ptr, usize);
1883 npages = arena_mapbits_large_size_get(chunk,
1884 pageind) >> LG_PAGE;
1885 } else {
1886 /* Skip small run. */
1887 size_t binind = arena_mapbits_binind_get(chunk,
1888 pageind);
1889 arena_bin_info_t *bin_info =
1890 &arena_bin_info[binind];
1891 npages = bin_info->run_size >> LG_PAGE;
1892 }
1893 } else {
1894 /* Skip unallocated run. */
1895 npages = arena_mapbits_unallocated_size_get(chunk,
1896 pageind) >> LG_PAGE;
1897 }
1898 assert(pageind + npages <= chunk_npages);
1899 }
1900}
1901
1902void
1903arena_reset(tsd_t *tsd, arena_t *arena)
1904{
1905 unsigned i;
1906 extent_node_t *node;
1907
1908 /*
1909 * Locking in this function is unintuitive. The caller guarantees that
1910 * no concurrent operations are happening in this arena, but there are
1911 * still reasons that some locking is necessary:
1912 *
1913 * - Some of the functions in the transitive closure of calls assume
1914 * appropriate locks are held, and in some cases these locks are
1915 * temporarily dropped to avoid lock order reversal or deadlock due to
1916 * reentry.
1917 * - mallctl("epoch", ...) may concurrently refresh stats. While
1918 * strictly speaking this is a "concurrent operation", disallowing
1919 * stats refreshes would impose an inconvenient burden.
1920 */
1921
1922 /* Remove large allocations from prof sample set. */
1923 if (config_prof && opt_prof) {
1924 ql_foreach(node, &arena->achunks, ql_link) {
1925 arena_achunk_prof_reset(tsd, arena,
1926 extent_node_addr_get(node));
1927 }
1928 }
1929
Jason Evans7e674952016-04-25 13:26:54 -07001930 /* Reset curruns for large size classes. */
1931 if (config_stats) {
1932 for (i = 0; i < nlclasses; i++)
1933 arena->stats.lstats[i].curruns = 0;
1934 }
1935
Jason Evans19ff2ce2016-04-22 14:37:17 -07001936 /* Huge allocations. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001937 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001938 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1939 ql_last(&arena->huge, ql_link)) {
1940 void *ptr = extent_node_addr_get(node);
Jason Evans7e674952016-04-25 13:26:54 -07001941 size_t usize;
Jason Evans19ff2ce2016-04-22 14:37:17 -07001942
Jason Evansc1e00ef2016-05-10 22:21:10 -07001943 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001944 if (config_stats || (config_prof && opt_prof))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001945 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans7e674952016-04-25 13:26:54 -07001946 /* Remove huge allocation from prof sample set. */
1947 if (config_prof && opt_prof)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001948 prof_free(tsd, ptr, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001949 huge_dalloc(tsd_tsdn(tsd), ptr);
1950 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001951 /* Cancel out unwanted effects on stats. */
1952 if (config_stats)
1953 arena_huge_reset_stats_cancel(arena, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001954 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001955 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001956
Jason Evansc1e00ef2016-05-10 22:21:10 -07001957 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001958
1959 /* Bins. */
1960 for (i = 0; i < NBINS; i++) {
1961 arena_bin_t *bin = &arena->bins[i];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001962 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001963 bin->runcur = NULL;
1964 arena_run_heap_new(&bin->runs);
1965 if (config_stats) {
1966 bin->stats.curregs = 0;
1967 bin->stats.curruns = 0;
1968 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001969 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001970 }
1971
1972 /*
1973 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1974 * chains directly correspond.
1975 */
1976 qr_new(&arena->runs_dirty, rd_link);
1977 for (node = qr_next(&arena->chunks_cache, cc_link);
1978 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1979 qr_new(&node->rd, rd_link);
1980 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1981 }
1982
1983 /* Arena chunks. */
1984 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1985 ql_last(&arena->achunks, ql_link)) {
1986 ql_remove(&arena->achunks, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001987 arena_chunk_discard(tsd_tsdn(tsd), arena,
1988 extent_node_addr_get(node));
Jason Evans19ff2ce2016-04-22 14:37:17 -07001989 }
1990
1991 /* Spare. */
1992 if (arena->spare != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001993 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001994 arena->spare = NULL;
1995 }
1996
1997 assert(!arena->purging);
1998 arena->nactive = 0;
1999
Jason Evansa4e83e82016-11-07 09:37:12 -08002000 for (i = 0; i < NPSIZES; i++)
Jason Evans19ff2ce2016-04-22 14:37:17 -07002001 arena_run_heap_new(&arena->runs_avail[i]);
2002
Jason Evansc1e00ef2016-05-10 22:21:10 -07002003 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07002004}
2005
2006static void
Jason Evansaa5113b2014-01-14 16:23:03 -08002007arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002008 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
2009 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08002010{
Jason Evansaa5113b2014-01-14 16:23:03 -08002011 size_t size = *p_size;
2012 size_t run_ind = *p_run_ind;
2013 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002014
2015 /* Try to coalesce forward. */
2016 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07002017 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07002018 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
2019 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
2020 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07002021 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
2022 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07002023 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002024
2025 /*
2026 * Remove successor from runs_avail; the coalesced run is
2027 * inserted later.
2028 */
Jason Evans203484e2012-05-02 00:30:36 -07002029 assert(arena_mapbits_unallocated_size_get(chunk,
2030 run_ind+run_pages+nrun_pages-1) == nrun_size);
2031 assert(arena_mapbits_dirty_get(chunk,
2032 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002033 assert(arena_mapbits_decommitted_get(chunk,
2034 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002035 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002036
Jason Evansee41ad42015-02-15 18:04:46 -08002037 /*
2038 * If the successor is dirty, remove it from the set of dirty
2039 * pages.
2040 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07002041 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08002042 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07002043 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002044 }
2045
Jason Evanse476f8a2010-01-16 09:53:50 -08002046 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002047 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002048
Jason Evans203484e2012-05-02 00:30:36 -07002049 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2050 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2051 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002052 }
2053
2054 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08002055 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2056 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07002057 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2058 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07002059 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2060 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07002061 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002062
Jason Evans12ca9142010-10-17 19:56:09 -07002063 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002064
2065 /*
2066 * Remove predecessor from runs_avail; the coalesced run is
2067 * inserted later.
2068 */
Jason Evans203484e2012-05-02 00:30:36 -07002069 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2070 prun_size);
2071 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002072 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2073 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002074 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002075
Jason Evansee41ad42015-02-15 18:04:46 -08002076 /*
2077 * If the predecessor is dirty, remove it from the set of dirty
2078 * pages.
2079 */
2080 if (flag_dirty != 0) {
2081 arena_run_dirty_remove(arena, chunk, run_ind,
2082 prun_pages);
2083 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07002084
Jason Evanse476f8a2010-01-16 09:53:50 -08002085 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002086 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002087
Jason Evans203484e2012-05-02 00:30:36 -07002088 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2089 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2090 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002091 }
2092
Jason Evansaa5113b2014-01-14 16:23:03 -08002093 *p_size = size;
2094 *p_run_ind = run_ind;
2095 *p_run_pages = run_pages;
2096}
2097
Jason Evans8fadb1a2015-08-04 10:49:46 -07002098static size_t
2099arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2100 size_t run_ind)
2101{
2102 size_t size;
2103
2104 assert(run_ind >= map_bias);
2105 assert(run_ind < chunk_npages);
2106
2107 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2108 size = arena_mapbits_large_size_get(chunk, run_ind);
2109 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2110 run_ind+(size>>LG_PAGE)-1) == 0);
2111 } else {
2112 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2113 size = bin_info->run_size;
2114 }
2115
2116 return (size);
2117}
2118
Jason Evansaa5113b2014-01-14 16:23:03 -08002119static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002120arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
Jason Evansb2c0d632016-04-13 23:36:15 -07002121 bool cleaned, bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08002122{
2123 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002124 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002125 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08002126
2127 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002128 miscelm = arena_run_to_miscelm(run);
2129 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08002130 assert(run_ind >= map_bias);
2131 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002132 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08002133 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08002134 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08002135
2136 /*
2137 * The run is dirty if the caller claims to have dirtied it, as well as
2138 * if it was already dirty before being allocated and the caller
2139 * doesn't claim to have cleaned it.
2140 */
2141 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2142 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002143 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2144 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08002145 dirty = true;
2146 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002147 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08002148
2149 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07002150 if (dirty || decommitted) {
2151 size_t flags = flag_dirty | flag_decommitted;
2152 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002153 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002154 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002155 } else {
2156 arena_mapbits_unallocated_set(chunk, run_ind, size,
2157 arena_mapbits_unzeroed_get(chunk, run_ind));
2158 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2159 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2160 }
2161
Jason Evans8fadb1a2015-08-04 10:49:46 -07002162 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2163 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08002164
Jason Evanse476f8a2010-01-16 09:53:50 -08002165 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07002166 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2167 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2168 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2169 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002170 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2171 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07002172 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07002173
Jason Evans070b3c32014-08-14 14:45:58 -07002174 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08002175 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002176
Jason Evans203484e2012-05-02 00:30:36 -07002177 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07002178 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07002179 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07002180 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07002181 arena_chunk_dalloc(tsdn, arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07002182 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002183
Jason Evans4fb7f512010-01-27 18:27:09 -08002184 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07002185 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08002186 * deallocated above, since in that case it is the spare. Waiting
2187 * until after possible chunk deallocation to do dirty processing
2188 * allows for an old spare to be fully deallocated, thus decreasing the
2189 * chances of spuriously crossing the dirty page purging threshold.
2190 */
Jason Evans8d4203c2010-04-13 20:53:21 -07002191 if (dirty)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002192 arena_maybe_purge(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002193}
2194
2195static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002196arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002197 arena_run_t *run, size_t oldsize, size_t newsize)
Jason Evanse476f8a2010-01-16 09:53:50 -08002198{
Jason Evans0c5dd032014-09-29 01:31:39 -07002199 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2200 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002201 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002202 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002203 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2204 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2205 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002206
2207 assert(oldsize > newsize);
2208
2209 /*
2210 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002211 * leading run as separately allocated. Set the last element of each
2212 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002213 */
Jason Evans203484e2012-05-02 00:30:36 -07002214 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002215 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2216 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2217 pageind+head_npages-1)));
2218 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2219 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002220
Jason Evans7372b152012-02-10 20:22:09 -08002221 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002222 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002223 assert(arena_mapbits_large_size_get(chunk,
2224 pageind+head_npages+tail_npages-1) == 0);
2225 assert(arena_mapbits_dirty_get(chunk,
2226 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002227 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002228 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002229 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2230 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002231
Jason Evansc1e00ef2016-05-10 22:21:10 -07002232 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
Jason Evansb2c0d632016-04-13 23:36:15 -07002233 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002234}
2235
2236static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002237arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002238 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08002239{
Jason Evans0c5dd032014-09-29 01:31:39 -07002240 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2241 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002242 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002243 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002244 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2245 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2246 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002247 arena_chunk_map_misc_t *tail_miscelm;
2248 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002249
2250 assert(oldsize > newsize);
2251
2252 /*
2253 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002254 * trailing run as separately allocated. Set the last element of each
2255 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002256 */
Jason Evans203484e2012-05-02 00:30:36 -07002257 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002258 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2259 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2260 pageind+head_npages-1)));
2261 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2262 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002263
Jason Evans203484e2012-05-02 00:30:36 -07002264 if (config_debug) {
2265 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2266 assert(arena_mapbits_large_size_get(chunk,
2267 pageind+head_npages+tail_npages-1) == 0);
2268 assert(arena_mapbits_dirty_get(chunk,
2269 pageind+head_npages+tail_npages-1) == flag_dirty);
2270 }
2271 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002272 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2273 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002274
Jason Evans61a6dfc2016-03-23 16:04:38 -07002275 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002276 tail_run = &tail_miscelm->run;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002277 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
Jason Evansb2c0d632016-04-13 23:36:15 -07002278 != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002279}
2280
Jason Evanse7a10582012-02-13 17:36:52 -08002281static void
2282arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2283{
Jason Evans0c5dd032014-09-29 01:31:39 -07002284 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002285
Jason Evansc6a2c392016-03-26 17:30:37 -07002286 arena_run_heap_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08002287}
2288
2289static arena_run_t *
2290arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2291{
Jason Evansc6a2c392016-03-26 17:30:37 -07002292 arena_chunk_map_misc_t *miscelm;
2293
2294 miscelm = arena_run_heap_remove_first(&bin->runs);
2295 if (miscelm == NULL)
2296 return (NULL);
2297 if (config_stats)
2298 bin->stats.reruns++;
2299
2300 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002301}
2302
2303static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002304arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002305{
Jason Evanse476f8a2010-01-16 09:53:50 -08002306 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002307 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002308 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002309
2310 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002311 run = arena_bin_nonfull_run_tryget(bin);
2312 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002313 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002314 /* No existing runs have any space available. */
2315
Jason Evans49f7e8f2011-03-15 13:59:15 -07002316 binind = arena_bin_index(arena, bin);
2317 bin_info = &arena_bin_info[binind];
2318
Jason Evanse476f8a2010-01-16 09:53:50 -08002319 /* Allocate a new run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002320 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002321 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002322 malloc_mutex_lock(tsdn, &arena->lock);
2323 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002324 if (run != NULL) {
2325 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002326 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002327 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002328 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002329 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002330 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002331 /********************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002332 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002333 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002334 if (config_stats) {
2335 bin->stats.nruns++;
2336 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002337 }
Jason Evanse00572b2010-03-14 19:43:56 -07002338 return (run);
2339 }
2340
2341 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002342 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002343 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002344 * so search one more time.
2345 */
Jason Evanse7a10582012-02-13 17:36:52 -08002346 run = arena_bin_nonfull_run_tryget(bin);
2347 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002348 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002349
2350 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002351}
2352
Jason Evans1e0a6362010-03-13 13:41:58 -08002353/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002354static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002355arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002356{
Jason Evansd01fd192015-08-19 15:21:32 -07002357 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002358 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002359 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002360
Jason Evans49f7e8f2011-03-15 13:59:15 -07002361 binind = arena_bin_index(arena, bin);
2362 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002363 bin->runcur = NULL;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002364 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002365 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2366 /*
2367 * Another thread updated runcur while this one ran without the
2368 * bin lock in arena_bin_nonfull_run_get().
2369 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002370 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002371 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002372 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002373 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002374 arena_chunk_t *chunk;
2375
2376 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002377 * arena_run_alloc_small() may have allocated run, or
2378 * it may have pulled run from the bin's run tree.
2379 * Therefore it is unsafe to make any assumptions about
2380 * how run has previously been used, and
2381 * arena_bin_lower_run() must be called, as if a region
2382 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002383 */
2384 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansb2c0d632016-04-13 23:36:15 -07002385 if (run->nfree == bin_info->nregs) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002386 arena_dalloc_bin_run(tsdn, arena, chunk, run,
Jason Evansb2c0d632016-04-13 23:36:15 -07002387 bin);
2388 } else
Jason Evans5c77af92016-11-14 18:27:23 -08002389 arena_bin_lower_run(arena, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002390 }
2391 return (ret);
2392 }
2393
2394 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002395 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002396
2397 bin->runcur = run;
2398
Jason Evanse476f8a2010-01-16 09:53:50 -08002399 assert(bin->runcur->nfree > 0);
2400
Jason Evans49f7e8f2011-03-15 13:59:15 -07002401 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002402}
2403
Jason Evans86815df2010-03-13 20:32:56 -08002404void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002405arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
Jason Evans243f7a02016-02-19 20:09:31 -08002406 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002407{
2408 unsigned i, nfill;
2409 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002410
2411 assert(tbin->ncached == 0);
2412
Jason Evansc1e00ef2016-05-10 22:21:10 -07002413 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2414 prof_idump(tsdn);
Jason Evanse69bee02010-03-15 22:25:23 -07002415 bin = &arena->bins[binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002416 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002417 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2418 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002419 arena_run_t *run;
2420 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002421 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002422 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002423 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002424 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002425 if (ptr == NULL) {
2426 /*
2427 * OOM. tbin->avail isn't yet filled down to its first
2428 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002429 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002430 */
2431 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002432 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002433 i * sizeof(void *));
2434 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002435 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002436 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002437 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002438 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2439 true);
2440 }
Jason Evans9c43c132011-03-18 10:53:15 -07002441 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002442 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002443 }
Jason Evans7372b152012-02-10 20:22:09 -08002444 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002445 bin->stats.nmalloc += i;
2446 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002447 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002448 bin->stats.nfills++;
2449 tbin->tstats.nrequests = 0;
2450 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002451 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002452 tbin->ncached = i;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002453 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002454}
Jason Evanse476f8a2010-01-16 09:53:50 -08002455
Jason Evans122449b2012-04-06 00:35:09 -07002456void
2457arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2458{
2459
Chris Petersona82070e2016-03-27 23:28:39 -07002460 size_t redzone_size = bin_info->redzone_size;
2461
Jason Evans122449b2012-04-06 00:35:09 -07002462 if (zero) {
Chris Petersona82070e2016-03-27 23:28:39 -07002463 memset((void *)((uintptr_t)ptr - redzone_size),
2464 JEMALLOC_ALLOC_JUNK, redzone_size);
2465 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2466 JEMALLOC_ALLOC_JUNK, redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07002467 } else {
Chris Petersona82070e2016-03-27 23:28:39 -07002468 memset((void *)((uintptr_t)ptr - redzone_size),
2469 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
Jason Evans122449b2012-04-06 00:35:09 -07002470 }
2471}
2472
Jason Evans0d6c5d82013-12-17 15:14:36 -08002473#ifdef JEMALLOC_JET
2474#undef arena_redzone_corruption
Jason Evansab0cfe02016-04-18 15:11:20 -07002475#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
Jason Evans0d6c5d82013-12-17 15:14:36 -08002476#endif
2477static void
2478arena_redzone_corruption(void *ptr, size_t usize, bool after,
2479 size_t offset, uint8_t byte)
2480{
2481
Jason Evans5fae7dc2015-07-23 13:56:25 -07002482 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2483 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002484 after ? "after" : "before", ptr, usize, byte);
2485}
2486#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002487#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002488#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2489arena_redzone_corruption_t *arena_redzone_corruption =
Jason Evansab0cfe02016-04-18 15:11:20 -07002490 JEMALLOC_N(n_arena_redzone_corruption);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002491#endif
2492
2493static void
2494arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002495{
Jason Evans122449b2012-04-06 00:35:09 -07002496 bool error = false;
2497
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002498 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002499 size_t size = bin_info->reg_size;
2500 size_t redzone_size = bin_info->redzone_size;
2501 size_t i;
2502
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002503 for (i = 1; i <= redzone_size; i++) {
2504 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
Chris Petersona82070e2016-03-27 23:28:39 -07002505 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002506 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002507 arena_redzone_corruption(ptr, size, false, i,
2508 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002509 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002510 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002511 }
2512 }
2513 for (i = 0; i < redzone_size; i++) {
2514 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
Chris Petersona82070e2016-03-27 23:28:39 -07002515 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002516 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002517 arena_redzone_corruption(ptr, size, true, i,
2518 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002519 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002520 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002521 }
Jason Evans122449b2012-04-06 00:35:09 -07002522 }
2523 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002524
Jason Evans122449b2012-04-06 00:35:09 -07002525 if (opt_abort && error)
2526 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002527}
Jason Evans122449b2012-04-06 00:35:09 -07002528
Jason Evans6b694c42014-01-07 16:47:56 -08002529#ifdef JEMALLOC_JET
2530#undef arena_dalloc_junk_small
Jason Evansab0cfe02016-04-18 15:11:20 -07002531#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
Jason Evans6b694c42014-01-07 16:47:56 -08002532#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002533void
2534arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2535{
2536 size_t redzone_size = bin_info->redzone_size;
2537
2538 arena_redzones_validate(ptr, bin_info, false);
Chris Petersona82070e2016-03-27 23:28:39 -07002539 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
Jason Evans122449b2012-04-06 00:35:09 -07002540 bin_info->reg_interval);
2541}
Jason Evans6b694c42014-01-07 16:47:56 -08002542#ifdef JEMALLOC_JET
2543#undef arena_dalloc_junk_small
2544#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2545arena_dalloc_junk_small_t *arena_dalloc_junk_small =
Jason Evansab0cfe02016-04-18 15:11:20 -07002546 JEMALLOC_N(n_arena_dalloc_junk_small);
Jason Evans6b694c42014-01-07 16:47:56 -08002547#endif
Jason Evans122449b2012-04-06 00:35:09 -07002548
Jason Evans0d6c5d82013-12-17 15:14:36 -08002549void
2550arena_quarantine_junk_small(void *ptr, size_t usize)
2551{
Jason Evansd01fd192015-08-19 15:21:32 -07002552 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002553 arena_bin_info_t *bin_info;
2554 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002555 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002556 assert(opt_quarantine);
2557 assert(usize <= SMALL_MAXCLASS);
2558
Jason Evans155bfa72014-10-05 17:54:10 -07002559 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002560 bin_info = &arena_bin_info[binind];
2561 arena_redzones_validate(ptr, bin_info, true);
2562}
2563
Jason Evans578cd162016-02-19 18:40:03 -08002564static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002565arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002566{
2567 void *ret;
2568 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002569 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002570 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002571
Jason Evansb1726102012-02-28 16:50:47 -08002572 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002573 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002574 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002575
Jason Evansc1e00ef2016-05-10 22:21:10 -07002576 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002577 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002578 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002579 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002580 ret = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002581
2582 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002583 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002584 return (NULL);
2585 }
2586
Jason Evans7372b152012-02-10 20:22:09 -08002587 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002588 bin->stats.nmalloc++;
2589 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002590 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002591 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002592 malloc_mutex_unlock(tsdn, &bin->lock);
2593 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2594 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002595
Jason Evans551ebc42014-10-03 10:16:09 -07002596 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002597 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002598 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002599 arena_alloc_junk_small(ret,
2600 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002601 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002602 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002603 }
Jason Evans0c516a02016-02-25 15:29:49 -08002604 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002605 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002606 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002607 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2608 true);
2609 }
Jason Evans0c516a02016-02-25 15:29:49 -08002610 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2611 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002612 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002613
Jason Evansc1e00ef2016-05-10 22:21:10 -07002614 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002615 return (ret);
2616}
2617
2618void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002619arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002620{
2621 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002622 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002623 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002624 arena_run_t *run;
2625 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002626 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002627
2628 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002629 usize = index2size(binind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002630 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002631 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002632 uint64_t r;
2633
Jason Evans8a03cf02015-05-04 09:58:36 -07002634 /*
2635 * Compute a uniformly distributed offset within the first page
2636 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2637 * for 4 KiB pages and 64-byte cachelines.
2638 */
Jason Evans5d6cb6e2016-11-07 10:52:44 -08002639 r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
2640 LG_CACHELINE, false);
Jason Evans8a03cf02015-05-04 09:58:36 -07002641 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2642 } else
2643 random_offset = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002644 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002645 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002646 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002647 return (NULL);
2648 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002649 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002650 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2651 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002652 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002653 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002654
Jason Evans7372b152012-02-10 20:22:09 -08002655 arena->stats.nmalloc_large++;
2656 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002657 arena->stats.allocated_large += usize;
2658 arena->stats.lstats[index].nmalloc++;
2659 arena->stats.lstats[index].nrequests++;
2660 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002661 }
Jason Evans7372b152012-02-10 20:22:09 -08002662 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002663 idump = arena_prof_accum_locked(arena, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002664 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002665 if (config_prof && idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002666 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002667
Jason Evans551ebc42014-10-03 10:16:09 -07002668 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002669 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002670 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002671 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002672 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002673 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002674 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002675 }
2676
Jason Evansc1e00ef2016-05-10 22:21:10 -07002677 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002678 return (ret);
2679}
2680
Jason Evans578cd162016-02-19 18:40:03 -08002681void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002682arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
Jason Evans66cd9532016-04-22 14:34:14 -07002683 bool zero)
Jason Evans578cd162016-02-19 18:40:03 -08002684{
2685
Jason Evansc1e00ef2016-05-10 22:21:10 -07002686 assert(!tsdn_null(tsdn) || arena != NULL);
2687
2688 if (likely(!tsdn_null(tsdn)))
2689 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans578cd162016-02-19 18:40:03 -08002690 if (unlikely(arena == NULL))
2691 return (NULL);
2692
2693 if (likely(size <= SMALL_MAXCLASS))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002694 return (arena_malloc_small(tsdn, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002695 if (likely(size <= large_maxclass))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002696 return (arena_malloc_large(tsdn, arena, ind, zero));
2697 return (huge_malloc(tsdn, arena, index2size(ind), zero));
Jason Evans578cd162016-02-19 18:40:03 -08002698}
2699
Jason Evanse476f8a2010-01-16 09:53:50 -08002700/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002701static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002702arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002703 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002704{
2705 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002706 size_t alloc_size, leadsize, trailsize;
2707 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002708 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002709 arena_chunk_map_misc_t *miscelm;
2710 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002711
Jason Evansc1e00ef2016-05-10 22:21:10 -07002712 assert(!tsdn_null(tsdn) || arena != NULL);
Jason Evans50883de2015-07-23 17:13:18 -07002713 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002714
Jason Evansc1e00ef2016-05-10 22:21:10 -07002715 if (likely(!tsdn_null(tsdn)))
2716 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002717 if (unlikely(arena == NULL))
2718 return (NULL);
2719
Jason Evans93443682010-10-20 17:39:18 -07002720 alignment = PAGE_CEILING(alignment);
Jason Evans05a9e4a2016-06-07 14:19:50 -07002721 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002722
Jason Evansc1e00ef2016-05-10 22:21:10 -07002723 malloc_mutex_lock(tsdn, &arena->lock);
2724 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002725 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002726 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002727 return (NULL);
2728 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002729 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002730 miscelm = arena_run_to_miscelm(run);
2731 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002732
Jason Evans0c5dd032014-09-29 01:31:39 -07002733 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2734 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002735 assert(alloc_size >= leadsize + usize);
2736 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002737 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002738 arena_chunk_map_misc_t *head_miscelm = miscelm;
2739 arena_run_t *head_run = run;
2740
Jason Evans61a6dfc2016-03-23 16:04:38 -07002741 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002742 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2743 LG_PAGE));
2744 run = &miscelm->run;
2745
Jason Evansc1e00ef2016-05-10 22:21:10 -07002746 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
Jason Evans0c5dd032014-09-29 01:31:39 -07002747 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002748 }
2749 if (trailsize != 0) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002750 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
Jason Evans50883de2015-07-23 17:13:18 -07002751 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002752 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002753 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2754 size_t run_ind =
2755 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002756 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2757 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2758 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002759
Jason Evansde249c82015-08-09 16:47:27 -07002760 assert(decommitted); /* Cause of OOM. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002761 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2762 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002763 return (NULL);
2764 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002765 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002766
Jason Evans7372b152012-02-10 20:22:09 -08002767 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002768 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002769
Jason Evans7372b152012-02-10 20:22:09 -08002770 arena->stats.nmalloc_large++;
2771 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002772 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002773 arena->stats.lstats[index].nmalloc++;
2774 arena->stats.lstats[index].nrequests++;
2775 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002776 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002777 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002778
Jason Evans551ebc42014-10-03 10:16:09 -07002779 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002780 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002781 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002782 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002783 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002784 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002785 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002786 return (ret);
2787}
2788
Jason Evans88fef7c2015-02-12 14:06:37 -08002789void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002790arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002791 bool zero, tcache_t *tcache)
2792{
2793 void *ret;
2794
Jason Evans8a03cf02015-05-04 09:58:36 -07002795 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002796 && (usize & PAGE_MASK) == 0))) {
2797 /* Small; alignment doesn't require special run placement. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002798 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002799 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002800 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002801 /*
2802 * Large; alignment doesn't require special run placement.
2803 * However, the cached pointer may be at a random offset from
2804 * the base of the run, so do some bit manipulation to retrieve
2805 * the base.
2806 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002807 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002808 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002809 if (config_cache_oblivious)
2810 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2811 } else {
Jason Evans676df882015-09-11 20:50:20 -07002812 if (likely(usize <= large_maxclass)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002813 ret = arena_palloc_large(tsdn, arena, usize, alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002814 zero);
2815 } else if (likely(alignment <= chunksize))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002816 ret = huge_malloc(tsdn, arena, usize, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002817 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002818 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002819 }
2820 }
2821 return (ret);
2822}
2823
Jason Evans0b270a92010-03-31 16:45:04 -07002824void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002825arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
Jason Evans0b270a92010-03-31 16:45:04 -07002826{
2827 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002828 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002829 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002830
Jason Evans78f73522012-04-18 13:38:40 -07002831 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002832 assert(ptr != NULL);
2833 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002834 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2835 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002836 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002837
2838 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002839 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002840 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002841 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002842 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002843
Jason Evansc1e00ef2016-05-10 22:21:10 -07002844 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2845 assert(isalloc(tsdn, ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002846}
Jason Evans6109fe02010-02-10 10:37:56 -08002847
Jason Evanse476f8a2010-01-16 09:53:50 -08002848static void
Jason Evans088e6a02010-10-18 00:04:44 -07002849arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002850 arena_bin_t *bin)
2851{
Jason Evanse476f8a2010-01-16 09:53:50 -08002852
Jason Evans19b3d612010-03-18 20:36:40 -07002853 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002854 if (run == bin->runcur)
2855 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002856 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002857 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002858 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002859 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2860
Jason Evansc6a2c392016-03-26 17:30:37 -07002861 /*
2862 * The following block's conditional is necessary because if the
2863 * run only contains one region, then it never gets inserted
2864 * into the non-full runs tree.
2865 */
Jason Evans49f7e8f2011-03-15 13:59:15 -07002866 if (bin_info->nregs != 1) {
Jason Evansc6a2c392016-03-26 17:30:37 -07002867 arena_chunk_map_misc_t *miscelm =
2868 arena_run_to_miscelm(run);
2869
2870 arena_run_heap_remove(&bin->runs, miscelm);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002871 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002872 }
Jason Evans088e6a02010-10-18 00:04:44 -07002873}
2874
2875static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002876arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002877 arena_run_t *run, arena_bin_t *bin)
Jason Evans088e6a02010-10-18 00:04:44 -07002878{
Jason Evans088e6a02010-10-18 00:04:44 -07002879
2880 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002881
Jason Evansc1e00ef2016-05-10 22:21:10 -07002882 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002883 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002884 malloc_mutex_lock(tsdn, &arena->lock);
2885 arena_run_dalloc(tsdn, arena, run, true, false, false);
2886 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002887 /****************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002888 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002889 if (config_stats)
2890 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002891}
2892
Jason Evans940a2e02010-10-17 17:51:37 -07002893static void
Jason Evans5c77af92016-11-14 18:27:23 -08002894arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002895{
Jason Evanse476f8a2010-01-16 09:53:50 -08002896
Jason Evans8de6a022010-10-17 20:57:30 -07002897 /*
Jason Evans5c77af92016-11-14 18:27:23 -08002898 * Make sure that if bin->runcur is non-NULL, it refers to the
2899 * oldest/lowest non-full run. It is okay to NULL runcur out rather
2900 * than proactively keeping it pointing at the oldest/lowest non-full
2901 * run.
Jason Evans8de6a022010-10-17 20:57:30 -07002902 */
Jason Evans5c77af92016-11-14 18:27:23 -08002903 if (bin->runcur != NULL &&
2904 arena_snad_comp(arena_run_to_miscelm(bin->runcur),
2905 arena_run_to_miscelm(run)) > 0) {
Jason Evans8de6a022010-10-17 20:57:30 -07002906 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002907 if (bin->runcur->nfree > 0)
2908 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002909 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002910 if (config_stats)
2911 bin->stats.reruns++;
2912 } else
2913 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002914}
2915
Jason Evansfc0b3b72014-10-09 17:54:06 -07002916static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002917arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002918 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002919{
Jason Evans0c5dd032014-09-29 01:31:39 -07002920 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002921 arena_run_t *run;
2922 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002923 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002924 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002925
Jason Evansae4c7b42012-04-02 07:04:34 -07002926 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002927 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002928 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002929 binind = run->binind;
2930 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002931 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002932
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002933 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002934 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002935
2936 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002937 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002938 arena_dissociate_bin_run(chunk, run, bin);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002939 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002940 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans5c77af92016-11-14 18:27:23 -08002941 arena_bin_lower_run(arena, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002942
Jason Evans7372b152012-02-10 20:22:09 -08002943 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002944 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002945 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002946 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002947}
2948
Jason Evanse476f8a2010-01-16 09:53:50 -08002949void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002950arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2951 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002952{
2953
Jason Evansc1e00ef2016-05-10 22:21:10 -07002954 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002955}
2956
2957void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002958arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002959 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002960{
2961 arena_run_t *run;
2962 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002963 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002964
Jason Evans0c5dd032014-09-29 01:31:39 -07002965 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002966 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002967 bin = &arena->bins[run->binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002968 malloc_mutex_lock(tsdn, &bin->lock);
2969 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2970 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans203484e2012-05-02 00:30:36 -07002971}
2972
2973void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002974arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2975 void *ptr, size_t pageind)
Jason Evans203484e2012-05-02 00:30:36 -07002976{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002977 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002978
2979 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002980 /* arena_ptr_small_binind_get() does extra sanity checking. */
2981 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2982 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002983 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002984 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002985 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2986 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002987}
Jason Evanse476f8a2010-01-16 09:53:50 -08002988
Jason Evans6b694c42014-01-07 16:47:56 -08002989#ifdef JEMALLOC_JET
2990#undef arena_dalloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07002991#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08002992#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002993void
Jason Evans6b694c42014-01-07 16:47:56 -08002994arena_dalloc_junk_large(void *ptr, size_t usize)
2995{
2996
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002997 if (config_fill && unlikely(opt_junk_free))
Chris Petersona82070e2016-03-27 23:28:39 -07002998 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Jason Evans6b694c42014-01-07 16:47:56 -08002999}
3000#ifdef JEMALLOC_JET
3001#undef arena_dalloc_junk_large
3002#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
3003arena_dalloc_junk_large_t *arena_dalloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003004 JEMALLOC_N(n_arena_dalloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003005#endif
3006
Jason Evanse56b24e2015-09-20 09:58:10 -07003007static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003008arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
3009 arena_chunk_t *chunk, void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08003010{
Jason Evans0c5dd032014-09-29 01:31:39 -07003011 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07003012 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3013 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07003014 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08003015
Jason Evans7372b152012-02-10 20:22:09 -08003016 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07003017 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
3018 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08003019
Jason Evansfc0b3b72014-10-09 17:54:06 -07003020 if (!junked)
3021 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08003022 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003023 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003024
Jason Evans7372b152012-02-10 20:22:09 -08003025 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08003026 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07003027 arena->stats.lstats[index].ndalloc++;
3028 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08003029 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003030 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003031
Jason Evansc1e00ef2016-05-10 22:21:10 -07003032 arena_run_dalloc(tsdn, arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003033}
3034
Jason Evans203484e2012-05-02 00:30:36 -07003035void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003036arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07003037 arena_chunk_t *chunk, void *ptr)
Jason Evansfc0b3b72014-10-09 17:54:06 -07003038{
3039
Jason Evansc1e00ef2016-05-10 22:21:10 -07003040 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003041}
3042
3043void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003044arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3045 void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07003046{
3047
Jason Evansc1e00ef2016-05-10 22:21:10 -07003048 malloc_mutex_lock(tsdn, &arena->lock);
3049 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3050 malloc_mutex_unlock(tsdn, &arena->lock);
3051 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07003052}
3053
Jason Evanse476f8a2010-01-16 09:53:50 -08003054static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003055arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003056 void *ptr, size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08003057{
Jason Evans0c5dd032014-09-29 01:31:39 -07003058 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07003059 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3060 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07003061 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08003062
3063 assert(size < oldsize);
3064
3065 /*
3066 * Shrink the run, and make trailing pages available for other
3067 * allocations.
3068 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003069 malloc_mutex_lock(tsdn, &arena->lock);
3070 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
Jason Evans8a03cf02015-05-04 09:58:36 -07003071 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08003072 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003073 szind_t oldindex = size2index(oldsize) - NBINS;
3074 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003075
Jason Evans7372b152012-02-10 20:22:09 -08003076 arena->stats.ndalloc_large++;
3077 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003078 arena->stats.lstats[oldindex].ndalloc++;
3079 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003080
Jason Evans7372b152012-02-10 20:22:09 -08003081 arena->stats.nmalloc_large++;
3082 arena->stats.nrequests_large++;
3083 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003084 arena->stats.lstats[index].nmalloc++;
3085 arena->stats.lstats[index].nrequests++;
3086 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08003087 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003088 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003089}
3090
3091static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003092arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003093 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003094{
Jason Evansae4c7b42012-04-02 07:04:34 -07003095 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07003096 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003097 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08003098
Jason Evans8a03cf02015-05-04 09:58:36 -07003099 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3100 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08003101
3102 /* Try to extend the run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003103 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07003104 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3105 pageind+npages) != 0)
3106 goto label_fail;
3107 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3108 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003109 /*
3110 * The next run is available and sufficiently large. Split the
3111 * following run, then merge the first part with the existing
3112 * allocation.
3113 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02003114 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07003115 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07003116
Jason Evans560a4e12015-09-11 16:18:53 -07003117 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07003118 while (oldsize + followsize < usize)
3119 usize = index2size(size2index(usize)-1);
3120 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07003121 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07003122 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07003123 if (splitsize == 0)
3124 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07003125
Jason Evans61a6dfc2016-03-23 16:04:38 -07003126 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07003127 if (arena_run_split_large(arena, run, splitsize, zero))
3128 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08003129
Jason Evansd260f442015-09-24 16:38:45 -07003130 if (config_cache_oblivious && zero) {
3131 /*
3132 * Zero the trailing bytes of the original allocation's
3133 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07003134 * There will always be trailing bytes, because ptr's
3135 * offset from the beginning of the run is a multiple of
3136 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07003137 */
Jason Evansa784e412015-09-24 22:21:55 -07003138 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3139 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3140 PAGE));
3141 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3142 assert(nzero > 0);
3143 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07003144 }
3145
Jason Evans088e6a02010-10-18 00:04:44 -07003146 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07003147 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07003148
3149 /*
3150 * Mark the extended run as dirty if either portion of the run
3151 * was dirty before allocation. This is rather pedantic,
3152 * because there's not actually any sequence of events that
3153 * could cause the resulting run to be passed to
3154 * arena_run_dalloc() with the dirty argument set to false
3155 * (which is when dirty flag consistency would really matter).
3156 */
Jason Evans203484e2012-05-02 00:30:36 -07003157 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3158 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07003159 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07003160 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07003161 flag_dirty | (flag_unzeroed_mask &
3162 arena_mapbits_unzeroed_get(chunk, pageind)));
3163 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3164 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3165 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08003166
Jason Evans7372b152012-02-10 20:22:09 -08003167 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003168 szind_t oldindex = size2index(oldsize) - NBINS;
3169 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003170
Jason Evans7372b152012-02-10 20:22:09 -08003171 arena->stats.ndalloc_large++;
3172 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003173 arena->stats.lstats[oldindex].ndalloc++;
3174 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003175
Jason Evans7372b152012-02-10 20:22:09 -08003176 arena->stats.nmalloc_large++;
3177 arena->stats.nrequests_large++;
3178 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003179 arena->stats.lstats[index].nmalloc++;
3180 arena->stats.lstats[index].nrequests++;
3181 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07003182 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003183 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003184 return (false);
3185 }
Jason Evans560a4e12015-09-11 16:18:53 -07003186label_fail:
Jason Evansc1e00ef2016-05-10 22:21:10 -07003187 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003188 return (true);
3189}
3190
Jason Evans6b694c42014-01-07 16:47:56 -08003191#ifdef JEMALLOC_JET
3192#undef arena_ralloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07003193#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08003194#endif
3195static void
3196arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3197{
3198
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02003199 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003200 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
Jason Evans6b694c42014-01-07 16:47:56 -08003201 old_usize - usize);
3202 }
3203}
3204#ifdef JEMALLOC_JET
3205#undef arena_ralloc_junk_large
3206#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3207arena_ralloc_junk_large_t *arena_ralloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003208 JEMALLOC_N(n_arena_ralloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003209#endif
3210
Jason Evanse476f8a2010-01-16 09:53:50 -08003211/*
3212 * Try to resize a large allocation, in order to avoid copying. This will
3213 * always fail if growing an object, and the following run is already in use.
3214 */
3215static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003216arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -07003217 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003218{
Jason Evans560a4e12015-09-11 16:18:53 -07003219 arena_chunk_t *chunk;
3220 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003221
Jason Evans560a4e12015-09-11 16:18:53 -07003222 if (oldsize == usize_max) {
3223 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003224 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003225 }
Jason Evans560a4e12015-09-11 16:18:53 -07003226
3227 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3228 arena = extent_node_arena_get(&chunk->node);
3229
3230 if (oldsize < usize_max) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003231 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
Jason Evansb2c0d632016-04-13 23:36:15 -07003232 oldsize, usize_min, usize_max, zero);
Jason Evans560a4e12015-09-11 16:18:53 -07003233 if (config_fill && !ret && !zero) {
3234 if (unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003235 memset((void *)((uintptr_t)ptr + oldsize),
3236 JEMALLOC_ALLOC_JUNK,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003237 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003238 } else if (unlikely(opt_zero)) {
3239 memset((void *)((uintptr_t)ptr + oldsize), 0,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003240 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003241 }
3242 }
3243 return (ret);
3244 }
3245
3246 assert(oldsize > usize_max);
3247 /* Fill before shrinking in order avoid a race. */
3248 arena_ralloc_junk_large(ptr, oldsize, usize_max);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003249 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
Jason Evans560a4e12015-09-11 16:18:53 -07003250 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003251}
3252
Jason Evansb2c31662014-01-12 15:05:44 -08003253bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003254arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08003255 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003256{
Jason Evans560a4e12015-09-11 16:18:53 -07003257 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003258
Jason Evans0c516a02016-02-25 15:29:49 -08003259 /* Calls with non-zero extra had to clamp extra. */
3260 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3261
Jason Evans0c516a02016-02-25 15:29:49 -08003262 if (unlikely(size > HUGE_MAXCLASS))
3263 return (true);
3264
Jason Evans560a4e12015-09-11 16:18:53 -07003265 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003266 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003267 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003268 arena_chunk_t *chunk;
3269
Jason Evans88fef7c2015-02-12 14:06:37 -08003270 /*
3271 * Avoid moving the allocation if the size class can be left the
3272 * same.
3273 */
Jason Evans560a4e12015-09-11 16:18:53 -07003274 if (oldsize <= SMALL_MAXCLASS) {
3275 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3276 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003277 if ((usize_max > SMALL_MAXCLASS ||
3278 size2index(usize_max) != size2index(oldsize)) &&
3279 (size > oldsize || usize_max < oldsize))
3280 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003281 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003282 if (usize_max <= SMALL_MAXCLASS)
3283 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003284 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
Jason Evans4985dc62016-02-19 19:24:58 -08003285 usize_max, zero))
3286 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003287 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003288
Jason Evans243f7a02016-02-19 20:09:31 -08003289 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003290 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003291 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003292 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003293 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
Jason Evans243f7a02016-02-19 20:09:31 -08003294 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003295 }
3296}
3297
3298static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003299arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans560a4e12015-09-11 16:18:53 -07003300 size_t alignment, bool zero, tcache_t *tcache)
3301{
3302
3303 if (alignment == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003304 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3305 zero, tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003306 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003307 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003308 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003309 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003310}
Jason Evanse476f8a2010-01-16 09:53:50 -08003311
Jason Evans8e3c3c62010-09-17 15:46:18 -07003312void *
Jason Evans5460aa62014-09-22 21:09:23 -07003313arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003314 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003315{
3316 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003317 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003318
Jason Evans560a4e12015-09-11 16:18:53 -07003319 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003320 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003321 return (NULL);
3322
Jason Evans676df882015-09-11 20:50:20 -07003323 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003324 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003325
Jason Evans88fef7c2015-02-12 14:06:37 -08003326 /* Try to avoid moving the allocation. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003327 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3328 zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003329 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003330
Jason Evans88fef7c2015-02-12 14:06:37 -08003331 /*
3332 * size and oldsize are different enough that we need to move
3333 * the object. In that case, fall back to allocating new space
3334 * and copying.
3335 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003336 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3337 alignment, zero, tcache);
Jason Evans560a4e12015-09-11 16:18:53 -07003338 if (ret == NULL)
3339 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003340
3341 /*
3342 * Junk/zero-filling were already done by
3343 * ipalloc()/arena_malloc().
3344 */
3345
Jason Evans560a4e12015-09-11 16:18:53 -07003346 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003347 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3348 memcpy(ret, ptr, copysize);
Jason Evans3ef51d72016-05-06 12:16:00 -07003349 isqalloc(tsd, ptr, oldsize, tcache, true);
Jason Evans88fef7c2015-02-12 14:06:37 -08003350 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003351 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3352 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003353 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003354 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003355}
3356
Jason Evans609ae592012-10-11 13:53:15 -07003357dss_prec_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07003358arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans609ae592012-10-11 13:53:15 -07003359{
3360 dss_prec_t ret;
3361
Jason Evansc1e00ef2016-05-10 22:21:10 -07003362 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003363 ret = arena->dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003364 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003365 return (ret);
3366}
3367
Jason Evans4d434ad2014-04-15 12:09:48 -07003368bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003369arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
Jason Evans609ae592012-10-11 13:53:15 -07003370{
3371
Jason Evans551ebc42014-10-03 10:16:09 -07003372 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003373 return (dss_prec != dss_prec_disabled);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003374 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003375 arena->dss_prec = dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003376 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003377 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003378}
3379
Jason Evans8d6a3e82015-03-18 18:55:33 -07003380ssize_t
3381arena_lg_dirty_mult_default_get(void)
3382{
3383
3384 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3385}
3386
3387bool
3388arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3389{
3390
Jason Evans243f7a02016-02-19 20:09:31 -08003391 if (opt_purge != purge_mode_ratio)
3392 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003393 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3394 return (true);
3395 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3396 return (false);
3397}
3398
Jason Evans243f7a02016-02-19 20:09:31 -08003399ssize_t
3400arena_decay_time_default_get(void)
3401{
3402
3403 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3404}
3405
3406bool
3407arena_decay_time_default_set(ssize_t decay_time)
3408{
3409
3410 if (opt_purge != purge_mode_decay)
3411 return (true);
3412 if (!arena_decay_time_valid(decay_time))
3413 return (true);
3414 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3415 return (false);
3416}
3417
Jason Evans3c07f802016-02-27 20:40:13 -08003418static void
3419arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3420 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3421 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003422{
Jason Evans609ae592012-10-11 13:53:15 -07003423
Jason Evans66cd9532016-04-22 14:34:14 -07003424 *nthreads += arena_nthreads_get(arena, false);
Jason Evans609ae592012-10-11 13:53:15 -07003425 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003426 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans94e7ffa2016-10-10 20:32:19 -07003427 *decay_time = arena->decay.time;
Jason Evans609ae592012-10-11 13:53:15 -07003428 *nactive += arena->nactive;
3429 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003430}
3431
3432void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003433arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003434 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3435 size_t *nactive, size_t *ndirty)
Jason Evans3c07f802016-02-27 20:40:13 -08003436{
3437
Jason Evansc1e00ef2016-05-10 22:21:10 -07003438 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003439 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3440 decay_time, nactive, ndirty);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003441 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003442}
3443
3444void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003445arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003446 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3447 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3448 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3449 malloc_huge_stats_t *hstats)
Jason Evans3c07f802016-02-27 20:40:13 -08003450{
3451 unsigned i;
3452
3453 cassert(config_stats);
3454
Jason Evansc1e00ef2016-05-10 22:21:10 -07003455 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003456 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3457 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003458
3459 astats->mapped += arena->stats.mapped;
Jason Evans04c3c0f2016-05-03 22:11:35 -07003460 astats->retained += arena->stats.retained;
Jason Evans609ae592012-10-11 13:53:15 -07003461 astats->npurge += arena->stats.npurge;
3462 astats->nmadvise += arena->stats.nmadvise;
3463 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003464 astats->metadata_mapped += arena->stats.metadata_mapped;
3465 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003466 astats->allocated_large += arena->stats.allocated_large;
3467 astats->nmalloc_large += arena->stats.nmalloc_large;
3468 astats->ndalloc_large += arena->stats.ndalloc_large;
3469 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003470 astats->allocated_huge += arena->stats.allocated_huge;
3471 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3472 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003473
3474 for (i = 0; i < nlclasses; i++) {
3475 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3476 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3477 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3478 lstats[i].curruns += arena->stats.lstats[i].curruns;
3479 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003480
3481 for (i = 0; i < nhclasses; i++) {
3482 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3483 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3484 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3485 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003486 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003487
3488 for (i = 0; i < NBINS; i++) {
3489 arena_bin_t *bin = &arena->bins[i];
3490
Jason Evansc1e00ef2016-05-10 22:21:10 -07003491 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003492 bstats[i].nmalloc += bin->stats.nmalloc;
3493 bstats[i].ndalloc += bin->stats.ndalloc;
3494 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003495 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003496 if (config_tcache) {
3497 bstats[i].nfills += bin->stats.nfills;
3498 bstats[i].nflushes += bin->stats.nflushes;
3499 }
3500 bstats[i].nruns += bin->stats.nruns;
3501 bstats[i].reruns += bin->stats.reruns;
3502 bstats[i].curruns += bin->stats.curruns;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003503 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003504 }
3505}
3506
Jason Evans767d8502016-02-24 23:58:10 -08003507unsigned
Jason Evans66cd9532016-04-22 14:34:14 -07003508arena_nthreads_get(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003509{
3510
Jason Evans66cd9532016-04-22 14:34:14 -07003511 return (atomic_read_u(&arena->nthreads[internal]));
Jason Evans767d8502016-02-24 23:58:10 -08003512}
3513
3514void
Jason Evans66cd9532016-04-22 14:34:14 -07003515arena_nthreads_inc(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003516{
3517
Jason Evans66cd9532016-04-22 14:34:14 -07003518 atomic_add_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003519}
3520
3521void
Jason Evans66cd9532016-04-22 14:34:14 -07003522arena_nthreads_dec(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003523{
3524
Jason Evans66cd9532016-04-22 14:34:14 -07003525 atomic_sub_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003526}
3527
Jason Evans5c77af92016-11-14 18:27:23 -08003528size_t
3529arena_extent_sn_next(arena_t *arena)
3530{
3531
3532 return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
3533}
3534
Jason Evans8bb31982014-10-07 23:14:57 -07003535arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003536arena_new(tsdn_t *tsdn, unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003537{
Jason Evans8bb31982014-10-07 23:14:57 -07003538 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003539 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003540
Jason Evans8bb31982014-10-07 23:14:57 -07003541 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003542 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3543 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003544 */
3545 if (config_stats) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003546 arena = (arena_t *)base_alloc(tsdn,
Jason Evansf193fd82016-04-08 14:17:57 -07003547 CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans28b7e422016-11-04 15:00:08 -07003548 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3549 + (nhclasses * sizeof(malloc_huge_stats_t)));
Jason Evans8bb31982014-10-07 23:14:57 -07003550 } else
Jason Evansf193fd82016-04-08 14:17:57 -07003551 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003552 if (arena == NULL)
3553 return (NULL);
3554
Jason Evans6109fe02010-02-10 10:37:56 -08003555 arena->ind = ind;
Jason Evans66cd9532016-04-22 14:34:14 -07003556 arena->nthreads[0] = arena->nthreads[1] = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07003557 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003558 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003559
Jason Evans7372b152012-02-10 20:22:09 -08003560 if (config_stats) {
3561 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003562 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003563 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08003564 memset(arena->stats.lstats, 0, nlclasses *
3565 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003566 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003567 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003568 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3569 memset(arena->stats.hstats, 0, nhclasses *
3570 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003571 if (config_tcache)
3572 ql_new(&arena->tcache_ql);
3573 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003574
Jason Evans7372b152012-02-10 20:22:09 -08003575 if (config_prof)
3576 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003577
Jason Evans8a03cf02015-05-04 09:58:36 -07003578 if (config_cache_oblivious) {
3579 /*
3580 * A nondeterministic seed based on the address of arena reduces
3581 * the likelihood of lockstep non-uniform cache index
3582 * utilization among identical concurrent processes, but at the
3583 * cost of test repeatability. For debug builds, instead use a
3584 * deterministic seed.
3585 */
3586 arena->offset_state = config_debug ? ind :
Jason Evans5d6cb6e2016-11-07 10:52:44 -08003587 (size_t)(uintptr_t)arena;
Jason Evans8a03cf02015-05-04 09:58:36 -07003588 }
3589
Jason Evanse2bcf032016-10-13 12:18:38 -07003590 arena->dss_prec = chunk_dss_prec_get();
Jason Evans609ae592012-10-11 13:53:15 -07003591
Jason Evans19ff2ce2016-04-22 14:37:17 -07003592 ql_new(&arena->achunks);
3593
Jason Evans5c77af92016-11-14 18:27:23 -08003594 arena->extent_sn_next = 0;
3595
Jason Evanse476f8a2010-01-16 09:53:50 -08003596 arena->spare = NULL;
3597
Jason Evans8d6a3e82015-03-18 18:55:33 -07003598 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003599 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003600 arena->nactive = 0;
3601 arena->ndirty = 0;
3602
Jason Evansa4e83e82016-11-07 09:37:12 -08003603 for (i = 0; i < NPSIZES; i++)
Jason Evansc6a2c392016-03-26 17:30:37 -07003604 arena_run_heap_new(&arena->runs_avail[i]);
Jason Evansf193fd82016-04-08 14:17:57 -07003605
Jason Evansee41ad42015-02-15 18:04:46 -08003606 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003607 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003608
Jason Evans243f7a02016-02-19 20:09:31 -08003609 if (opt_purge == purge_mode_decay)
3610 arena_decay_init(arena, arena_decay_time_default_get());
3611
Jason Evansee41ad42015-02-15 18:04:46 -08003612 ql_new(&arena->huge);
Jason Evansb2c0d632016-04-13 23:36:15 -07003613 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3614 WITNESS_RANK_ARENA_HUGE))
Jason Evansee41ad42015-02-15 18:04:46 -08003615 return (NULL);
3616
Jason Evans5c77af92016-11-14 18:27:23 -08003617 extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
Jason Evansb49a3342015-07-28 11:28:19 -04003618 extent_tree_ad_new(&arena->chunks_ad_cached);
Jason Evans5c77af92016-11-14 18:27:23 -08003619 extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
Jason Evansb49a3342015-07-28 11:28:19 -04003620 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansb2c0d632016-04-13 23:36:15 -07003621 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3622 WITNESS_RANK_ARENA_CHUNKS))
Jason Evansee41ad42015-02-15 18:04:46 -08003623 return (NULL);
3624 ql_new(&arena->node_cache);
Jason Evansb2c0d632016-04-13 23:36:15 -07003625 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3626 WITNESS_RANK_ARENA_NODE_CACHE))
Jason Evansee41ad42015-02-15 18:04:46 -08003627 return (NULL);
3628
Jason Evansb49a3342015-07-28 11:28:19 -04003629 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003630
3631 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003632 for (i = 0; i < NBINS; i++) {
Jason Evansc9a4bf92016-04-22 14:36:48 -07003633 arena_bin_t *bin = &arena->bins[i];
Jason Evansb2c0d632016-04-13 23:36:15 -07003634 if (malloc_mutex_init(&bin->lock, "arena_bin",
3635 WITNESS_RANK_ARENA_BIN))
Jason Evans8bb31982014-10-07 23:14:57 -07003636 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003637 bin->runcur = NULL;
Jason Evansc6a2c392016-03-26 17:30:37 -07003638 arena_run_heap_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003639 if (config_stats)
3640 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003641 }
3642
Jason Evans8bb31982014-10-07 23:14:57 -07003643 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003644}
3645
Jason Evans49f7e8f2011-03-15 13:59:15 -07003646/*
3647 * Calculate bin_info->run_size such that it meets the following constraints:
3648 *
Jason Evans155bfa72014-10-05 17:54:10 -07003649 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003650 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003651 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003652 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3653 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003654 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003655static void
3656bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003657{
Jason Evans122449b2012-04-06 00:35:09 -07003658 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003659 size_t try_run_size, perfect_run_size, actual_run_size;
3660 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003661
3662 /*
Jason Evans122449b2012-04-06 00:35:09 -07003663 * Determine redzone size based on minimum alignment and minimum
3664 * redzone size. Add padding to the end of the run if it is needed to
3665 * align the regions. The padding allows each redzone to be half the
3666 * minimum alignment; without the padding, each redzone would have to
3667 * be twice as large in order to maintain alignment.
3668 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003669 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003670 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003671 if (align_min <= REDZONE_MINSIZE) {
3672 bin_info->redzone_size = REDZONE_MINSIZE;
3673 pad_size = 0;
3674 } else {
3675 bin_info->redzone_size = align_min >> 1;
3676 pad_size = bin_info->redzone_size;
3677 }
3678 } else {
3679 bin_info->redzone_size = 0;
3680 pad_size = 0;
3681 }
3682 bin_info->reg_interval = bin_info->reg_size +
3683 (bin_info->redzone_size << 1);
3684
3685 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003686 * Compute run size under ideal conditions (no redzones, no limit on run
3687 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003688 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003689 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003690 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003691 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003692 perfect_run_size = try_run_size;
3693 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003694
Jason Evansae4c7b42012-04-02 07:04:34 -07003695 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003696 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003697 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3698 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003699
Jason Evans0c5dd032014-09-29 01:31:39 -07003700 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003701 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3702 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003703
3704 /*
3705 * Redzones can require enough padding that not even a single region can
3706 * fit within the number of pages that would normally be dedicated to a
3707 * run for this size class. Increase the run size until at least one
3708 * region fits.
3709 */
3710 while (actual_nregs == 0) {
3711 assert(config_fill && unlikely(opt_redzone));
3712
3713 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003714 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3715 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003716 }
3717
3718 /*
3719 * Make sure that the run will fit within an arena chunk.
3720 */
Jason Evans155bfa72014-10-05 17:54:10 -07003721 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003722 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003723 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3724 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003725 }
3726 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003727 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003728
3729 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003730 bin_info->run_size = actual_run_size;
3731 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003732 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3733 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003734
3735 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3736 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003737}
3738
Jason Evansb1726102012-02-28 16:50:47 -08003739static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003740bin_info_init(void)
3741{
3742 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003743
Jason Evans8a03cf02015-05-04 09:58:36 -07003744#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003745 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003746 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003747 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003748 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003749#define BIN_INFO_INIT_bin_no(index, size)
Jason Evans1abb49f2016-04-17 16:16:11 -07003750#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
Jason Evansd04047c2014-05-28 16:11:55 -07003751 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003752 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003753#undef BIN_INFO_INIT_bin_yes
3754#undef BIN_INFO_INIT_bin_no
3755#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003756}
3757
Jason Evans5d8db152016-04-08 14:16:19 -07003758void
Jason Evansa0bf2422010-01-29 14:30:41 -08003759arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003760{
Jason Evans7393f442010-10-01 17:35:43 -07003761 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003762
Jason Evans8d6a3e82015-03-18 18:55:33 -07003763 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003764 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003765
Jason Evanse476f8a2010-01-16 09:53:50 -08003766 /*
3767 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003768 * page map. The page map is biased to omit entries for the header
3769 * itself, so some iteration is necessary to compute the map bias.
3770 *
3771 * 1) Compute safe header_size and map_bias values that include enough
3772 * space for an unbiased page map.
3773 * 2) Refine map_bias based on (1) to omit the header pages in the page
3774 * map. The resulting map_bias may be one too small.
3775 * 3) Refine map_bias based on (2). The result will be >= the result
3776 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003777 */
Jason Evans7393f442010-10-01 17:35:43 -07003778 map_bias = 0;
3779 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003780 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003781 ((sizeof(arena_chunk_map_bits_t) +
3782 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003783 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003784 }
3785 assert(map_bias > 0);
3786
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003787 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3788 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3789
Jason Evans155bfa72014-10-05 17:54:10 -07003790 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003791 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003792 large_maxclass = index2size(size2index(chunksize)-1);
3793 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003794 /*
3795 * For small chunk sizes it's possible for there to be fewer
3796 * non-header pages available than are necessary to serve the
3797 * size classes just below chunksize.
3798 */
Jason Evans676df882015-09-11 20:50:20 -07003799 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003800 }
Jason Evans676df882015-09-11 20:50:20 -07003801 assert(large_maxclass > 0);
3802 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003803 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003804
Jason Evansb1726102012-02-28 16:50:47 -08003805 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08003806}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003807
3808void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003809arena_prefork0(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003810{
3811
Jason Evansc1e00ef2016-05-10 22:21:10 -07003812 malloc_mutex_prefork(tsdn, &arena->lock);
Jason Evans174c0c32016-04-25 23:14:40 -07003813}
3814
3815void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003816arena_prefork1(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003817{
3818
Jason Evansc1e00ef2016-05-10 22:21:10 -07003819 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003820}
3821
3822void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003823arena_prefork2(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003824{
3825
Jason Evansc1e00ef2016-05-10 22:21:10 -07003826 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003827}
3828
3829void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003830arena_prefork3(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003831{
3832 unsigned i;
3833
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003834 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003835 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3836 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003837}
3838
3839void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003840arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003841{
3842 unsigned i;
3843
Jason Evansc1e00ef2016-05-10 22:21:10 -07003844 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003845 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003846 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3847 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3848 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3849 malloc_mutex_postfork_parent(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003850}
3851
3852void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003853arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003854{
3855 unsigned i;
3856
Jason Evansc1e00ef2016-05-10 22:21:10 -07003857 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003858 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003859 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3860 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3861 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3862 malloc_mutex_postfork_child(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003863}