blob: 1e5cb00cd218718e5ac0a0c86ad490d3936e6ce4 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005
Christopher Ferris83e57672015-04-22 06:59:28 +00006static extent_node_t *
7huge_node_get(const void *ptr)
8{
9 extent_node_t *node;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans831d5852015-05-15 17:02:30 -070011 node = chunk_lookup(ptr, true);
Christopher Ferris83e57672015-04-22 06:59:28 +000012 assert(!extent_node_achunk_get(node));
Jason Evanse476f8a2010-01-16 09:53:50 -080013
Christopher Ferris83e57672015-04-22 06:59:28 +000014 return (node);
15}
Jason Evanscbf3a6d2015-02-11 12:24:27 -080016
Christopher Ferris83e57672015-04-22 06:59:28 +000017static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -070018huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -080019{
20
Christopher Ferris83e57672015-04-22 06:59:28 +000021 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
Jason Evansc1e00ef2016-05-10 22:21:10 -070023 return (chunk_register(tsdn, ptr, node));
Jason Evanscbf3a6d2015-02-11 12:24:27 -080024}
25
26static void
Jason Evans7790a0b2016-05-11 00:52:59 -070027huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
28{
29 bool err;
30
31 err = huge_node_set(tsdn, ptr, node);
32 assert(!err);
Christopher Ferris83e57672015-04-22 06:59:28 +000033}
34
35static void
36huge_node_unset(const void *ptr, const extent_node_t *node)
37{
38
39 chunk_deregister(ptr, node);
Jason Evanscbf3a6d2015-02-11 12:24:27 -080040}
Jason Evanse476f8a2010-01-16 09:53:50 -080041
42void *
Jason Evansc1e00ef2016-05-10 22:21:10 -070043huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
Christopher Ferris83e57672015-04-22 06:59:28 +000044{
Christopher Ferris83e57672015-04-22 06:59:28 +000045
Jason Evans0c516a02016-02-25 15:29:49 -080046 assert(usize == s2u(usize));
Christopher Ferris83e57672015-04-22 06:59:28 +000047
Jason Evansc1e00ef2016-05-10 22:21:10 -070048 return (huge_palloc(tsdn, arena, usize, chunksize, zero));
Christopher Ferris83e57672015-04-22 06:59:28 +000049}
50
51void *
Jason Evansc1e00ef2016-05-10 22:21:10 -070052huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans66cd9532016-04-22 14:34:14 -070053 bool zero)
Mike Hommeyeae26902012-04-10 19:50:33 +020054{
Jason Evanse476f8a2010-01-16 09:53:50 -080055 void *ret;
Jason Evans0c516a02016-02-25 15:29:49 -080056 size_t ausize;
Jason Evans962a2972016-10-20 23:59:12 -070057 arena_t *iarena;
Jason Evanse476f8a2010-01-16 09:53:50 -080058 extent_node_t *node;
Jason Evans5c77af92016-11-14 18:27:23 -080059 size_t sn;
Jason Evans7ad54c12012-04-21 16:04:51 -070060 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080061
62 /* Allocate one or more contiguous chunks for this request. */
63
Jason Evansc1e00ef2016-05-10 22:21:10 -070064 assert(!tsdn_null(tsdn) || arena != NULL);
65
Jason Evans0c516a02016-02-25 15:29:49 -080066 ausize = sa2u(usize, alignment);
67 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
Jason Evans87ccb552015-07-23 17:16:32 -070068 return (NULL);
Jason Evans0c516a02016-02-25 15:29:49 -080069 assert(ausize >= chunksize);
Jason Evans87ccb552015-07-23 17:16:32 -070070
Jason Evanse476f8a2010-01-16 09:53:50 -080071 /* Allocate an extent node with which to track the chunk. */
Jason Evans5c77af92016-11-14 18:27:23 -080072 iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
73 a0get();
Jason Evansc1e00ef2016-05-10 22:21:10 -070074 node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
Jason Evans962a2972016-10-20 23:59:12 -070075 CACHELINE, false, NULL, true, iarena);
Jason Evanse476f8a2010-01-16 09:53:50 -080076 if (node == NULL)
77 return (NULL);
78
Jason Evans7ad54c12012-04-21 16:04:51 -070079 /*
80 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
81 * it is possible to make correct junk/zero fill decisions below.
82 */
83 is_zeroed = zero;
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070084 /* ANDROID change */
Christopher Ferris5ab62bd2016-06-14 14:24:23 -070085 if (likely(!tsdn_null(tsdn))) {
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070086#if !defined(__LP64__)
Christopher Ferris5ab62bd2016-06-14 14:24:23 -070087 /* On 32 bit systems, using a per arena cache can exhaust
88 * virtual address space. Force all huge allocations to
89 * always take place in the first arena.
90 */
91 extern arena_t *a0get(void);
92 arena = a0get();
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070093#else
Jason Evans1c35f632016-05-11 16:52:58 -070094 arena = arena_choose(tsdn_tsd(tsdn), arena);
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070095#endif
Christopher Ferris5ab62bd2016-06-14 14:24:23 -070096 }
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070097 /* End ANDROID change */
Jason Evansc1e00ef2016-05-10 22:21:10 -070098 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
Jason Evans5c77af92016-11-14 18:27:23 -080099 arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700100 idalloctm(tsdn, node, NULL, true, true);
Christopher Ferris83e57672015-04-22 06:59:28 +0000101 return (NULL);
102 }
103
Jason Evans5c77af92016-11-14 18:27:23 -0800104 extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
Christopher Ferris83e57672015-04-22 06:59:28 +0000105
Jason Evansc1e00ef2016-05-10 22:21:10 -0700106 if (huge_node_set(tsdn, ret, node)) {
Jason Evans5c77af92016-11-14 18:27:23 -0800107 arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700108 idalloctm(tsdn, node, NULL, true, true);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800109 return (NULL);
110 }
111
112 /* Insert node into huge. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700113 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000114 ql_elm_new(node, ql_link);
115 ql_tail_insert(&arena->huge, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700116 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Jason Evanse476f8a2010-01-16 09:53:50 -0800117
Christopher Ferris83e57672015-04-22 06:59:28 +0000118 if (zero || (config_fill && unlikely(opt_zero))) {
119 if (!is_zeroed)
Jason Evans0c516a02016-02-25 15:29:49 -0800120 memset(ret, 0, usize);
Christopher Ferris83e57672015-04-22 06:59:28 +0000121 } else if (config_fill && unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -0700122 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000123
Jason Evansc1e00ef2016-05-10 22:21:10 -0700124 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800125 return (ret);
126}
127
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000128#ifdef JEMALLOC_JET
129#undef huge_dalloc_junk
130#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
131#endif
132static void
Jason Evanse2bcf032016-10-13 12:18:38 -0700133huge_dalloc_junk(void *ptr, size_t usize)
Jason Evanse476f8a2010-01-16 09:53:50 -0800134{
Jason Evanse476f8a2010-01-16 09:53:50 -0800135
Christopher Ferris83e57672015-04-22 06:59:28 +0000136 if (config_fill && have_dss && unlikely(opt_junk_free)) {
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000137 /*
138 * Only bother junk filling if the chunk isn't about to be
139 * unmapped.
140 */
Jason Evanse2bcf032016-10-13 12:18:38 -0700141 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
Chris Petersona82070e2016-03-27 23:28:39 -0700142 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000143 }
Jason Evans4581b972014-11-27 17:22:36 -0200144}
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000145#ifdef JEMALLOC_JET
146#undef huge_dalloc_junk
147#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
148huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
149#endif
Jason Evans4581b972014-11-27 17:22:36 -0200150
Christopher Ferris83e57672015-04-22 06:59:28 +0000151static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700152huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700153 size_t usize_min, size_t usize_max, bool zero)
Jason Evans4581b972014-11-27 17:22:36 -0200154{
Jason Evans560a4e12015-09-11 16:18:53 -0700155 size_t usize, usize_next;
Christopher Ferris83e57672015-04-22 06:59:28 +0000156 extent_node_t *node;
157 arena_t *arena;
Jason Evansb49a3342015-07-28 11:28:19 -0400158 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evansd260f442015-09-24 16:38:45 -0700159 bool pre_zeroed, post_zeroed;
Jason Evans4581b972014-11-27 17:22:36 -0200160
Christopher Ferris83e57672015-04-22 06:59:28 +0000161 /* Increase usize to incorporate extra. */
Jason Evans560a4e12015-09-11 16:18:53 -0700162 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
163 <= oldsize; usize = usize_next)
164 ; /* Do nothing. */
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000165
Christopher Ferris83e57672015-04-22 06:59:28 +0000166 if (oldsize == usize)
167 return;
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000168
Christopher Ferris83e57672015-04-22 06:59:28 +0000169 node = huge_node_get(ptr);
170 arena = extent_node_arena_get(node);
Jason Evansd260f442015-09-24 16:38:45 -0700171 pre_zeroed = extent_node_zeroed_get(node);
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000172
Christopher Ferris83e57672015-04-22 06:59:28 +0000173 /* Fill if necessary (shrinking). */
174 if (oldsize > usize) {
175 size_t sdiff = oldsize - usize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000176 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -0700177 memset((void *)((uintptr_t)ptr + usize),
178 JEMALLOC_FREE_JUNK, sdiff);
Jason Evansd260f442015-09-24 16:38:45 -0700179 post_zeroed = false;
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900180 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700181 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700182 &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
183 sdiff);
Christopher Ferris83e57672015-04-22 06:59:28 +0000184 }
185 } else
Jason Evansd260f442015-09-24 16:38:45 -0700186 post_zeroed = pre_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000187
Jason Evansc1e00ef2016-05-10 22:21:10 -0700188 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000189 /* Update the size of the huge allocation. */
Jason Evans7790a0b2016-05-11 00:52:59 -0700190 huge_node_unset(ptr, node);
Christopher Ferris83e57672015-04-22 06:59:28 +0000191 assert(extent_node_size_get(node) != usize);
192 extent_node_size_set(node, usize);
Jason Evans7790a0b2016-05-11 00:52:59 -0700193 huge_node_reset(tsdn, ptr, node);
Jason Evansd260f442015-09-24 16:38:45 -0700194 /* Update zeroed. */
195 extent_node_zeroed_set(node, post_zeroed);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700196 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000197
Jason Evansc1e00ef2016-05-10 22:21:10 -0700198 arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
Christopher Ferris83e57672015-04-22 06:59:28 +0000199
200 /* Fill if necessary (growing). */
201 if (oldsize < usize) {
202 if (zero || (config_fill && unlikely(opt_zero))) {
Jason Evansd260f442015-09-24 16:38:45 -0700203 if (!pre_zeroed) {
Christopher Ferris83e57672015-04-22 06:59:28 +0000204 memset((void *)((uintptr_t)ptr + oldsize), 0,
205 usize - oldsize);
206 }
207 } else if (config_fill && unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -0700208 memset((void *)((uintptr_t)ptr + oldsize),
209 JEMALLOC_ALLOC_JUNK, usize - oldsize);
Christopher Ferris83e57672015-04-22 06:59:28 +0000210 }
211 }
212}
213
Jason Evansb49a3342015-07-28 11:28:19 -0400214static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700215huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
216 size_t usize)
Christopher Ferris83e57672015-04-22 06:59:28 +0000217{
218 extent_node_t *node;
219 arena_t *arena;
Jason Evansb49a3342015-07-28 11:28:19 -0400220 chunk_hooks_t chunk_hooks;
221 size_t cdiff;
Jason Evansd260f442015-09-24 16:38:45 -0700222 bool pre_zeroed, post_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000223
224 node = huge_node_get(ptr);
225 arena = extent_node_arena_get(node);
Jason Evansd260f442015-09-24 16:38:45 -0700226 pre_zeroed = extent_node_zeroed_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700227 chunk_hooks = chunk_hooks_get(tsdn, arena);
Christopher Ferris83e57672015-04-22 06:59:28 +0000228
Jason Evans560a4e12015-09-11 16:18:53 -0700229 assert(oldsize > usize);
230
Jason Evansb49a3342015-07-28 11:28:19 -0400231 /* Split excess chunks. */
232 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
233 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
234 CHUNK_CEILING(usize), cdiff, true, arena->ind))
235 return (true);
Christopher Ferris83e57672015-04-22 06:59:28 +0000236
237 if (oldsize > usize) {
238 size_t sdiff = oldsize - usize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000239 if (config_fill && unlikely(opt_junk_free)) {
Jason Evanse2bcf032016-10-13 12:18:38 -0700240 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
Christopher Ferris83e57672015-04-22 06:59:28 +0000241 sdiff);
Jason Evansd260f442015-09-24 16:38:45 -0700242 post_zeroed = false;
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900243 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700244 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700245 &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
246 usize), CHUNK_CEILING(oldsize),
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900247 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
Christopher Ferris83e57672015-04-22 06:59:28 +0000248 }
249 } else
Jason Evansd260f442015-09-24 16:38:45 -0700250 post_zeroed = pre_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000251
Jason Evansc1e00ef2016-05-10 22:21:10 -0700252 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000253 /* Update the size of the huge allocation. */
Jason Evans7790a0b2016-05-11 00:52:59 -0700254 huge_node_unset(ptr, node);
Christopher Ferris83e57672015-04-22 06:59:28 +0000255 extent_node_size_set(node, usize);
Jason Evans7790a0b2016-05-11 00:52:59 -0700256 huge_node_reset(tsdn, ptr, node);
Jason Evansd260f442015-09-24 16:38:45 -0700257 /* Update zeroed. */
258 extent_node_zeroed_set(node, post_zeroed);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700259 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000260
261 /* Zap the excess chunks. */
Jason Evans5c77af92016-11-14 18:27:23 -0800262 arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
263 extent_node_sn_get(node));
Jason Evansb49a3342015-07-28 11:28:19 -0400264
265 return (false);
Christopher Ferris83e57672015-04-22 06:59:28 +0000266}
267
268static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700269huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
270 size_t usize, bool zero) {
Christopher Ferris83e57672015-04-22 06:59:28 +0000271 extent_node_t *node;
272 arena_t *arena;
273 bool is_zeroed_subchunk, is_zeroed_chunk;
274
Christopher Ferris83e57672015-04-22 06:59:28 +0000275 node = huge_node_get(ptr);
276 arena = extent_node_arena_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700277 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000278 is_zeroed_subchunk = extent_node_zeroed_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700279 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000280
281 /*
Jason Evansa7fdcc82016-05-17 17:12:13 -0700282 * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
283 * update extent's zeroed field, and zero as necessary.
Christopher Ferris83e57672015-04-22 06:59:28 +0000284 */
Jason Evansa7fdcc82016-05-17 17:12:13 -0700285 is_zeroed_chunk = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700286 if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
Christopher Ferris83e57672015-04-22 06:59:28 +0000287 &is_zeroed_chunk))
288 return (true);
289
Jason Evansc1e00ef2016-05-10 22:21:10 -0700290 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Jason Evans7790a0b2016-05-11 00:52:59 -0700291 huge_node_unset(ptr, node);
Christopher Ferris83e57672015-04-22 06:59:28 +0000292 extent_node_size_set(node, usize);
Jason Evansa7fdcc82016-05-17 17:12:13 -0700293 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
294 is_zeroed_chunk);
Jason Evans7790a0b2016-05-11 00:52:59 -0700295 huge_node_reset(tsdn, ptr, node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700296 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000297
298 if (zero || (config_fill && unlikely(opt_zero))) {
299 if (!is_zeroed_subchunk) {
300 memset((void *)((uintptr_t)ptr + oldsize), 0,
301 CHUNK_CEILING(oldsize) - oldsize);
302 }
303 if (!is_zeroed_chunk) {
304 memset((void *)((uintptr_t)ptr +
305 CHUNK_CEILING(oldsize)), 0, usize -
306 CHUNK_CEILING(oldsize));
307 }
308 } else if (config_fill && unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -0700309 memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
310 usize - oldsize);
Christopher Ferris83e57672015-04-22 06:59:28 +0000311 }
312
313 return (false);
314}
315
316bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700317huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -0700318 size_t usize_max, bool zero)
Christopher Ferris83e57672015-04-22 06:59:28 +0000319{
Christopher Ferris83e57672015-04-22 06:59:28 +0000320
321 assert(s2u(oldsize) == oldsize);
Jason Evans0c516a02016-02-25 15:29:49 -0800322 /* The following should have been caught by callers. */
323 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
Jason Evans560a4e12015-09-11 16:18:53 -0700324
325 /* Both allocations must be huge to avoid a move. */
326 if (oldsize < chunksize || usize_max < chunksize)
Christopher Ferris83e57672015-04-22 06:59:28 +0000327 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -0700328
329 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
330 /* Attempt to expand the allocation in-place. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700331 if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
Jason Evans243f7a02016-02-19 20:09:31 -0800332 zero)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700333 arena_decay_tick(tsdn, huge_aalloc(ptr));
Jason Evans560a4e12015-09-11 16:18:53 -0700334 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -0800335 }
Jason Evans560a4e12015-09-11 16:18:53 -0700336 /* Try again, this time with usize_min. */
337 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
Jason Evansc1e00ef2016-05-10 22:21:10 -0700338 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
Jason Evansb2c0d632016-04-13 23:36:15 -0700339 ptr, oldsize, usize_min, zero)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700340 arena_decay_tick(tsdn, huge_aalloc(ptr));
Jason Evans560a4e12015-09-11 16:18:53 -0700341 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -0800342 }
Christopher Ferris83e57672015-04-22 06:59:28 +0000343 }
344
345 /*
346 * Avoid moving the allocation if the existing chunk size accommodates
347 * the new size.
348 */
Jason Evans560a4e12015-09-11 16:18:53 -0700349 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
350 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700351 huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
Jason Evansb2c0d632016-04-13 23:36:15 -0700352 usize_max, zero);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700353 arena_decay_tick(tsdn, huge_aalloc(ptr));
Christopher Ferris83e57672015-04-22 06:59:28 +0000354 return (false);
355 }
356
Jason Evansb49a3342015-07-28 11:28:19 -0400357 /* Attempt to shrink the allocation in-place. */
Jason Evans243f7a02016-02-19 20:09:31 -0800358 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700359 if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
360 usize_max)) {
361 arena_decay_tick(tsdn, huge_aalloc(ptr));
Jason Evans243f7a02016-02-19 20:09:31 -0800362 return (false);
363 }
364 }
Jason Evans560a4e12015-09-11 16:18:53 -0700365 return (true);
366}
Christopher Ferris83e57672015-04-22 06:59:28 +0000367
Jason Evans560a4e12015-09-11 16:18:53 -0700368static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700369huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans66cd9532016-04-22 14:34:14 -0700370 size_t alignment, bool zero)
Jason Evans560a4e12015-09-11 16:18:53 -0700371{
Christopher Ferris83e57672015-04-22 06:59:28 +0000372
Jason Evans560a4e12015-09-11 16:18:53 -0700373 if (alignment <= chunksize)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700374 return (huge_malloc(tsdn, arena, usize, zero));
375 return (huge_palloc(tsdn, arena, usize, alignment, zero));
Christopher Ferris83e57672015-04-22 06:59:28 +0000376}
377
378void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700379huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
380 size_t usize, size_t alignment, bool zero, tcache_t *tcache)
Christopher Ferris83e57672015-04-22 06:59:28 +0000381{
382 void *ret;
383 size_t copysize;
384
Jason Evans0c516a02016-02-25 15:29:49 -0800385 /* The following should have been caught by callers. */
386 assert(usize > 0 && usize <= HUGE_MAXCLASS);
387
Christopher Ferris83e57672015-04-22 06:59:28 +0000388 /* Try to avoid moving the allocation. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700389 if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
390 zero))
Christopher Ferris83e57672015-04-22 06:59:28 +0000391 return (ptr);
392
393 /*
Jason Evans560a4e12015-09-11 16:18:53 -0700394 * usize and oldsize are different enough that we need to use a
Christopher Ferris83e57672015-04-22 06:59:28 +0000395 * different size class. In that case, fall back to allocating new
396 * space and copying.
397 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700398 ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
399 zero);
Jason Evans560a4e12015-09-11 16:18:53 -0700400 if (ret == NULL)
401 return (NULL);
Christopher Ferris83e57672015-04-22 06:59:28 +0000402
Jason Evans560a4e12015-09-11 16:18:53 -0700403 copysize = (usize < oldsize) ? usize : oldsize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000404 memcpy(ret, ptr, copysize);
Jason Evans3ef51d72016-05-06 12:16:00 -0700405 isqalloc(tsd, ptr, oldsize, tcache, true);
Christopher Ferris83e57672015-04-22 06:59:28 +0000406 return (ret);
407}
408
409void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700410huge_dalloc(tsdn_t *tsdn, void *ptr)
Christopher Ferris83e57672015-04-22 06:59:28 +0000411{
412 extent_node_t *node;
413 arena_t *arena;
414
415 node = huge_node_get(ptr);
416 arena = extent_node_arena_get(node);
417 huge_node_unset(ptr, node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700418 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000419 ql_remove(&arena->huge, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700420 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000421
Jason Evanse2bcf032016-10-13 12:18:38 -0700422 huge_dalloc_junk(extent_node_addr_get(node),
Christopher Ferris83e57672015-04-22 06:59:28 +0000423 extent_node_size_get(node));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700424 arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
Jason Evans5c77af92016-11-14 18:27:23 -0800425 extent_node_addr_get(node), extent_node_size_get(node),
426 extent_node_sn_get(node));
Jason Evansc1e00ef2016-05-10 22:21:10 -0700427 idalloctm(tsdn, node, NULL, true, true);
Jason Evans243f7a02016-02-19 20:09:31 -0800428
Jason Evansc1e00ef2016-05-10 22:21:10 -0700429 arena_decay_tick(tsdn, arena);
Christopher Ferris83e57672015-04-22 06:59:28 +0000430}
431
432arena_t *
433huge_aalloc(const void *ptr)
434{
435
436 return (extent_node_arena_get(huge_node_get(ptr)));
Jason Evanse476f8a2010-01-16 09:53:50 -0800437}
438
439size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -0700440huge_salloc(tsdn_t *tsdn, const void *ptr)
Jason Evanse476f8a2010-01-16 09:53:50 -0800441{
Christopher Ferris83e57672015-04-22 06:59:28 +0000442 size_t size;
443 extent_node_t *node;
444 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800445
Christopher Ferris83e57672015-04-22 06:59:28 +0000446 node = huge_node_get(ptr);
447 arena = extent_node_arena_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700448 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000449 size = extent_node_size_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700450 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800451
Christopher Ferris83e57672015-04-22 06:59:28 +0000452 return (size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800453}
454
Christopher Ferris83e57672015-04-22 06:59:28 +0000455prof_tctx_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700456huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800457{
Christopher Ferris83e57672015-04-22 06:59:28 +0000458 prof_tctx_t *tctx;
459 extent_node_t *node;
460 arena_t *arena;
Jason Evans6109fe02010-02-10 10:37:56 -0800461
Christopher Ferris83e57672015-04-22 06:59:28 +0000462 node = huge_node_get(ptr);
463 arena = extent_node_arena_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700464 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000465 tctx = extent_node_prof_tctx_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700466 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800467
Christopher Ferris83e57672015-04-22 06:59:28 +0000468 return (tctx);
Jason Evans6109fe02010-02-10 10:37:56 -0800469}
470
471void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700472huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800473{
Christopher Ferris83e57672015-04-22 06:59:28 +0000474 extent_node_t *node;
475 arena_t *arena;
Jason Evans6109fe02010-02-10 10:37:56 -0800476
Christopher Ferris83e57672015-04-22 06:59:28 +0000477 node = huge_node_get(ptr);
478 arena = extent_node_arena_get(node);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700479 malloc_mutex_lock(tsdn, &arena->huge_mtx);
Christopher Ferris83e57672015-04-22 06:59:28 +0000480 extent_node_prof_tctx_set(node, tctx);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700481 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700482}
Jason Evans708ed792015-09-14 23:48:11 -0700483
484void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700485huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
Jason Evans708ed792015-09-14 23:48:11 -0700486{
487
Jason Evansc1e00ef2016-05-10 22:21:10 -0700488 huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
Jason Evans708ed792015-09-14 23:48:11 -0700489}