blob: 3f3943f9c0dd11ea134a483e7c00f8ba5462d438 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_HUGE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005
Christopher Ferris83e57672015-04-22 06:59:28 +00006static extent_node_t *
7huge_node_get(const void *ptr)
8{
9 extent_node_t *node;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans831d5852015-05-15 17:02:30 -070011 node = chunk_lookup(ptr, true);
Christopher Ferris83e57672015-04-22 06:59:28 +000012 assert(!extent_node_achunk_get(node));
Jason Evanse476f8a2010-01-16 09:53:50 -080013
Christopher Ferris83e57672015-04-22 06:59:28 +000014 return (node);
15}
Jason Evanscbf3a6d2015-02-11 12:24:27 -080016
Christopher Ferris83e57672015-04-22 06:59:28 +000017static bool
18huge_node_set(const void *ptr, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -080019{
20
Christopher Ferris83e57672015-04-22 06:59:28 +000021 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(ptr, node));
24}
25
26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
Jason Evanscbf3a6d2015-02-11 12:24:27 -080031}
Jason Evanse476f8a2010-01-16 09:53:50 -080032
33void *
Christopher Ferrise4294032016-03-02 14:33:02 -080034huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
Christopher Ferris83e57672015-04-22 06:59:28 +000035 tcache_t *tcache)
36{
Christopher Ferris83e57672015-04-22 06:59:28 +000037
Christopher Ferrise4294032016-03-02 14:33:02 -080038 assert(usize == s2u(usize));
Christopher Ferris83e57672015-04-22 06:59:28 +000039
40 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
41}
42
43void *
Christopher Ferrise4294032016-03-02 14:33:02 -080044huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
Christopher Ferris83e57672015-04-22 06:59:28 +000045 bool zero, tcache_t *tcache)
Mike Hommeyeae26902012-04-10 19:50:33 +020046{
Jason Evanse476f8a2010-01-16 09:53:50 -080047 void *ret;
Christopher Ferrise4294032016-03-02 14:33:02 -080048 size_t ausize;
Jason Evanse476f8a2010-01-16 09:53:50 -080049 extent_node_t *node;
Jason Evans7ad54c12012-04-21 16:04:51 -070050 bool is_zeroed;
Jason Evanse476f8a2010-01-16 09:53:50 -080051
52 /* Allocate one or more contiguous chunks for this request. */
53
Christopher Ferrise4294032016-03-02 14:33:02 -080054 ausize = sa2u(usize, alignment);
55 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
Jason Evans87ccb552015-07-23 17:16:32 -070056 return (NULL);
Christopher Ferrise4294032016-03-02 14:33:02 -080057 assert(ausize >= chunksize);
Jason Evans87ccb552015-07-23 17:16:32 -070058
Jason Evanse476f8a2010-01-16 09:53:50 -080059 /* Allocate an extent node with which to track the chunk. */
Christopher Ferris83e57672015-04-22 06:59:28 +000060 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61 CACHELINE, false, tcache, true, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -080062 if (node == NULL)
63 return (NULL);
64
Jason Evans7ad54c12012-04-21 16:04:51 -070065 /*
66 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67 * it is possible to make correct junk/zero fill decisions below.
68 */
69 is_zeroed = zero;
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070070 /* ANDROID change */
71#if !defined(__LP64__)
72 /* On 32 bit systems, using a per arena cache can exhaust
73 * virtual address space. Force all huge allocations to
74 * always take place in the first arena.
75 */
Christopher Ferris54d4dfa2016-03-02 16:24:07 -080076 extern arena_t *a0get(void);
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070077 arena = a0get();
78#else
Christopher Ferris83e57672015-04-22 06:59:28 +000079 arena = arena_choose(tsd, arena);
Christopher Ferris6ff2aaf2015-08-09 19:36:28 -070080#endif
81 /* End ANDROID change */
Christopher Ferris83e57672015-04-22 06:59:28 +000082 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
Christopher Ferrise4294032016-03-02 14:33:02 -080083 usize, alignment, &is_zeroed)) == NULL) {
Qi Wangf4a0f322015-10-27 15:12:10 -070084 idalloctm(tsd, node, tcache, true, true);
Christopher Ferris83e57672015-04-22 06:59:28 +000085 return (NULL);
86 }
87
Christopher Ferrise4294032016-03-02 14:33:02 -080088 extent_node_init(node, arena, ret, usize, is_zeroed, true);
Christopher Ferris83e57672015-04-22 06:59:28 +000089
90 if (huge_node_set(ret, node)) {
Christopher Ferrise4294032016-03-02 14:33:02 -080091 arena_chunk_dalloc_huge(arena, ret, usize);
Qi Wangf4a0f322015-10-27 15:12:10 -070092 idalloctm(tsd, node, tcache, true, true);
Jason Evanscbf3a6d2015-02-11 12:24:27 -080093 return (NULL);
94 }
95
96 /* Insert node into huge. */
Christopher Ferris83e57672015-04-22 06:59:28 +000097 malloc_mutex_lock(&arena->huge_mtx);
98 ql_elm_new(node, ql_link);
99 ql_tail_insert(&arena->huge, node, ql_link);
100 malloc_mutex_unlock(&arena->huge_mtx);
Jason Evanse476f8a2010-01-16 09:53:50 -0800101
Christopher Ferris83e57672015-04-22 06:59:28 +0000102 if (zero || (config_fill && unlikely(opt_zero))) {
103 if (!is_zeroed)
Christopher Ferrise4294032016-03-02 14:33:02 -0800104 memset(ret, 0, usize);
Christopher Ferris83e57672015-04-22 06:59:28 +0000105 } else if (config_fill && unlikely(opt_junk_alloc))
Christopher Ferrise4294032016-03-02 14:33:02 -0800106 memset(ret, 0xa5, usize);
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000107
Christopher Ferrise4294032016-03-02 14:33:02 -0800108 arena_decay_tick(tsd, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800109 return (ret);
110}
111
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000112#ifdef JEMALLOC_JET
113#undef huge_dalloc_junk
114#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
115#endif
116static void
117huge_dalloc_junk(void *ptr, size_t usize)
Jason Evanse476f8a2010-01-16 09:53:50 -0800118{
Jason Evanse476f8a2010-01-16 09:53:50 -0800119
Christopher Ferris83e57672015-04-22 06:59:28 +0000120 if (config_fill && have_dss && unlikely(opt_junk_free)) {
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000121 /*
122 * Only bother junk filling if the chunk isn't about to be
123 * unmapped.
124 */
Christopher Ferris83e57672015-04-22 06:59:28 +0000125 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000126 memset(ptr, 0x5a, usize);
127 }
Jason Evans4581b972014-11-27 17:22:36 -0200128}
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000129#ifdef JEMALLOC_JET
130#undef huge_dalloc_junk
131#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
132huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
133#endif
Jason Evans4581b972014-11-27 17:22:36 -0200134
Christopher Ferris83e57672015-04-22 06:59:28 +0000135static void
Jason Evans560a4e12015-09-11 16:18:53 -0700136huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
137 size_t usize_max, bool zero)
Jason Evans4581b972014-11-27 17:22:36 -0200138{
Jason Evans560a4e12015-09-11 16:18:53 -0700139 size_t usize, usize_next;
Christopher Ferris83e57672015-04-22 06:59:28 +0000140 extent_node_t *node;
141 arena_t *arena;
Jason Evansb49a3342015-07-28 11:28:19 -0400142 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evansd260f442015-09-24 16:38:45 -0700143 bool pre_zeroed, post_zeroed;
Jason Evans4581b972014-11-27 17:22:36 -0200144
Christopher Ferris83e57672015-04-22 06:59:28 +0000145 /* Increase usize to incorporate extra. */
Jason Evans560a4e12015-09-11 16:18:53 -0700146 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
147 <= oldsize; usize = usize_next)
148 ; /* Do nothing. */
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000149
Christopher Ferris83e57672015-04-22 06:59:28 +0000150 if (oldsize == usize)
151 return;
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000152
Christopher Ferris83e57672015-04-22 06:59:28 +0000153 node = huge_node_get(ptr);
154 arena = extent_node_arena_get(node);
Jason Evansd260f442015-09-24 16:38:45 -0700155 pre_zeroed = extent_node_zeroed_get(node);
Nicolas Geoffray75929a92015-04-16 11:10:55 +0000156
Christopher Ferris83e57672015-04-22 06:59:28 +0000157 /* Fill if necessary (shrinking). */
158 if (oldsize > usize) {
159 size_t sdiff = oldsize - usize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000160 if (config_fill && unlikely(opt_junk_free)) {
161 memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
Jason Evansd260f442015-09-24 16:38:45 -0700162 post_zeroed = false;
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900163 } else {
Jason Evansd260f442015-09-24 16:38:45 -0700164 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
165 ptr, CHUNK_CEILING(oldsize), usize, sdiff);
Christopher Ferris83e57672015-04-22 06:59:28 +0000166 }
167 } else
Jason Evansd260f442015-09-24 16:38:45 -0700168 post_zeroed = pre_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000169
170 malloc_mutex_lock(&arena->huge_mtx);
171 /* Update the size of the huge allocation. */
172 assert(extent_node_size_get(node) != usize);
173 extent_node_size_set(node, usize);
Jason Evansd260f442015-09-24 16:38:45 -0700174 /* Update zeroed. */
175 extent_node_zeroed_set(node, post_zeroed);
Christopher Ferris83e57672015-04-22 06:59:28 +0000176 malloc_mutex_unlock(&arena->huge_mtx);
177
178 arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
179
180 /* Fill if necessary (growing). */
181 if (oldsize < usize) {
182 if (zero || (config_fill && unlikely(opt_zero))) {
Jason Evansd260f442015-09-24 16:38:45 -0700183 if (!pre_zeroed) {
Christopher Ferris83e57672015-04-22 06:59:28 +0000184 memset((void *)((uintptr_t)ptr + oldsize), 0,
185 usize - oldsize);
186 }
187 } else if (config_fill && unlikely(opt_junk_alloc)) {
188 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
189 oldsize);
190 }
191 }
192}
193
Jason Evansb49a3342015-07-28 11:28:19 -0400194static bool
Christopher Ferris83e57672015-04-22 06:59:28 +0000195huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
196{
197 extent_node_t *node;
198 arena_t *arena;
Jason Evansb49a3342015-07-28 11:28:19 -0400199 chunk_hooks_t chunk_hooks;
200 size_t cdiff;
Jason Evansd260f442015-09-24 16:38:45 -0700201 bool pre_zeroed, post_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000202
203 node = huge_node_get(ptr);
204 arena = extent_node_arena_get(node);
Jason Evansd260f442015-09-24 16:38:45 -0700205 pre_zeroed = extent_node_zeroed_get(node);
Jason Evansb49a3342015-07-28 11:28:19 -0400206 chunk_hooks = chunk_hooks_get(arena);
Christopher Ferris83e57672015-04-22 06:59:28 +0000207
Jason Evans560a4e12015-09-11 16:18:53 -0700208 assert(oldsize > usize);
209
Jason Evansb49a3342015-07-28 11:28:19 -0400210 /* Split excess chunks. */
211 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
212 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
213 CHUNK_CEILING(usize), cdiff, true, arena->ind))
214 return (true);
Christopher Ferris83e57672015-04-22 06:59:28 +0000215
216 if (oldsize > usize) {
217 size_t sdiff = oldsize - usize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000218 if (config_fill && unlikely(opt_junk_free)) {
219 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
220 sdiff);
Jason Evansd260f442015-09-24 16:38:45 -0700221 post_zeroed = false;
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900222 } else {
Jason Evansd260f442015-09-24 16:38:45 -0700223 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
Mike Hommey4a2a3c92015-08-28 13:45:51 +0900224 CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
225 CHUNK_CEILING(oldsize),
226 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
Christopher Ferris83e57672015-04-22 06:59:28 +0000227 }
228 } else
Jason Evansd260f442015-09-24 16:38:45 -0700229 post_zeroed = pre_zeroed;
Christopher Ferris83e57672015-04-22 06:59:28 +0000230
231 malloc_mutex_lock(&arena->huge_mtx);
232 /* Update the size of the huge allocation. */
233 extent_node_size_set(node, usize);
Jason Evansd260f442015-09-24 16:38:45 -0700234 /* Update zeroed. */
235 extent_node_zeroed_set(node, post_zeroed);
Christopher Ferris83e57672015-04-22 06:59:28 +0000236 malloc_mutex_unlock(&arena->huge_mtx);
237
238 /* Zap the excess chunks. */
239 arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
Jason Evansb49a3342015-07-28 11:28:19 -0400240
241 return (false);
Christopher Ferris83e57672015-04-22 06:59:28 +0000242}
243
244static bool
Jason Evans560a4e12015-09-11 16:18:53 -0700245huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
Christopher Ferris83e57672015-04-22 06:59:28 +0000246 extent_node_t *node;
247 arena_t *arena;
248 bool is_zeroed_subchunk, is_zeroed_chunk;
249
Christopher Ferris83e57672015-04-22 06:59:28 +0000250 node = huge_node_get(ptr);
251 arena = extent_node_arena_get(node);
252 malloc_mutex_lock(&arena->huge_mtx);
253 is_zeroed_subchunk = extent_node_zeroed_get(node);
254 malloc_mutex_unlock(&arena->huge_mtx);
255
256 /*
257 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
258 * that it is possible to make correct junk/zero fill decisions below.
259 */
260 is_zeroed_chunk = zero;
261
262 if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
263 &is_zeroed_chunk))
264 return (true);
265
266 malloc_mutex_lock(&arena->huge_mtx);
267 /* Update the size of the huge allocation. */
268 extent_node_size_set(node, usize);
269 malloc_mutex_unlock(&arena->huge_mtx);
270
271 if (zero || (config_fill && unlikely(opt_zero))) {
272 if (!is_zeroed_subchunk) {
273 memset((void *)((uintptr_t)ptr + oldsize), 0,
274 CHUNK_CEILING(oldsize) - oldsize);
275 }
276 if (!is_zeroed_chunk) {
277 memset((void *)((uintptr_t)ptr +
278 CHUNK_CEILING(oldsize)), 0, usize -
279 CHUNK_CEILING(oldsize));
280 }
281 } else if (config_fill && unlikely(opt_junk_alloc)) {
282 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
283 oldsize);
284 }
285
286 return (false);
287}
288
289bool
Christopher Ferrise4294032016-03-02 14:33:02 -0800290huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -0700291 size_t usize_max, bool zero)
Christopher Ferris83e57672015-04-22 06:59:28 +0000292{
Christopher Ferris83e57672015-04-22 06:59:28 +0000293
294 assert(s2u(oldsize) == oldsize);
Christopher Ferrise4294032016-03-02 14:33:02 -0800295 /* The following should have been caught by callers. */
296 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
Jason Evans560a4e12015-09-11 16:18:53 -0700297
298 /* Both allocations must be huge to avoid a move. */
299 if (oldsize < chunksize || usize_max < chunksize)
Christopher Ferris83e57672015-04-22 06:59:28 +0000300 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -0700301
302 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
303 /* Attempt to expand the allocation in-place. */
Christopher Ferrise4294032016-03-02 14:33:02 -0800304 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
305 zero)) {
306 arena_decay_tick(tsd, huge_aalloc(ptr));
Jason Evans560a4e12015-09-11 16:18:53 -0700307 return (false);
Christopher Ferrise4294032016-03-02 14:33:02 -0800308 }
Jason Evans560a4e12015-09-11 16:18:53 -0700309 /* Try again, this time with usize_min. */
310 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
311 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
Christopher Ferrise4294032016-03-02 14:33:02 -0800312 oldsize, usize_min, zero)) {
313 arena_decay_tick(tsd, huge_aalloc(ptr));
Jason Evans560a4e12015-09-11 16:18:53 -0700314 return (false);
Christopher Ferrise4294032016-03-02 14:33:02 -0800315 }
Christopher Ferris83e57672015-04-22 06:59:28 +0000316 }
317
318 /*
319 * Avoid moving the allocation if the existing chunk size accommodates
320 * the new size.
321 */
Jason Evans560a4e12015-09-11 16:18:53 -0700322 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
323 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
324 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
Christopher Ferris83e57672015-04-22 06:59:28 +0000325 zero);
Christopher Ferrise4294032016-03-02 14:33:02 -0800326 arena_decay_tick(tsd, huge_aalloc(ptr));
Christopher Ferris83e57672015-04-22 06:59:28 +0000327 return (false);
328 }
329
Jason Evansb49a3342015-07-28 11:28:19 -0400330 /* Attempt to shrink the allocation in-place. */
Christopher Ferrise4294032016-03-02 14:33:02 -0800331 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
332 if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
333 arena_decay_tick(tsd, huge_aalloc(ptr));
334 return (false);
335 }
336 }
Jason Evans560a4e12015-09-11 16:18:53 -0700337 return (true);
338}
Christopher Ferris83e57672015-04-22 06:59:28 +0000339
Jason Evans560a4e12015-09-11 16:18:53 -0700340static void *
341huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
342 size_t alignment, bool zero, tcache_t *tcache)
343{
Christopher Ferris83e57672015-04-22 06:59:28 +0000344
Jason Evans560a4e12015-09-11 16:18:53 -0700345 if (alignment <= chunksize)
346 return (huge_malloc(tsd, arena, usize, zero, tcache));
347 return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
Christopher Ferris83e57672015-04-22 06:59:28 +0000348}
349
350void *
Jason Evans560a4e12015-09-11 16:18:53 -0700351huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
352 size_t alignment, bool zero, tcache_t *tcache)
Christopher Ferris83e57672015-04-22 06:59:28 +0000353{
354 void *ret;
355 size_t copysize;
356
Christopher Ferrise4294032016-03-02 14:33:02 -0800357 /* The following should have been caught by callers. */
358 assert(usize > 0 && usize <= HUGE_MAXCLASS);
359
Christopher Ferris83e57672015-04-22 06:59:28 +0000360 /* Try to avoid moving the allocation. */
Christopher Ferrise4294032016-03-02 14:33:02 -0800361 if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
Christopher Ferris83e57672015-04-22 06:59:28 +0000362 return (ptr);
363
364 /*
Jason Evans560a4e12015-09-11 16:18:53 -0700365 * usize and oldsize are different enough that we need to use a
Christopher Ferris83e57672015-04-22 06:59:28 +0000366 * different size class. In that case, fall back to allocating new
367 * space and copying.
368 */
Jason Evans560a4e12015-09-11 16:18:53 -0700369 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
370 tcache);
371 if (ret == NULL)
372 return (NULL);
Christopher Ferris83e57672015-04-22 06:59:28 +0000373
Jason Evans560a4e12015-09-11 16:18:53 -0700374 copysize = (usize < oldsize) ? usize : oldsize;
Christopher Ferris83e57672015-04-22 06:59:28 +0000375 memcpy(ret, ptr, copysize);
376 isqalloc(tsd, ptr, oldsize, tcache);
377 return (ret);
378}
379
380void
381huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
382{
383 extent_node_t *node;
384 arena_t *arena;
385
386 node = huge_node_get(ptr);
387 arena = extent_node_arena_get(node);
388 huge_node_unset(ptr, node);
389 malloc_mutex_lock(&arena->huge_mtx);
390 ql_remove(&arena->huge, node, ql_link);
391 malloc_mutex_unlock(&arena->huge_mtx);
392
393 huge_dalloc_junk(extent_node_addr_get(node),
394 extent_node_size_get(node));
395 arena_chunk_dalloc_huge(extent_node_arena_get(node),
396 extent_node_addr_get(node), extent_node_size_get(node));
Qi Wangf4a0f322015-10-27 15:12:10 -0700397 idalloctm(tsd, node, tcache, true, true);
Christopher Ferrise4294032016-03-02 14:33:02 -0800398
399 arena_decay_tick(tsd, arena);
Christopher Ferris83e57672015-04-22 06:59:28 +0000400}
401
402arena_t *
403huge_aalloc(const void *ptr)
404{
405
406 return (extent_node_arena_get(huge_node_get(ptr)));
Jason Evanse476f8a2010-01-16 09:53:50 -0800407}
408
409size_t
410huge_salloc(const void *ptr)
411{
Christopher Ferris83e57672015-04-22 06:59:28 +0000412 size_t size;
413 extent_node_t *node;
414 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800415
Christopher Ferris83e57672015-04-22 06:59:28 +0000416 node = huge_node_get(ptr);
417 arena = extent_node_arena_get(node);
418 malloc_mutex_lock(&arena->huge_mtx);
419 size = extent_node_size_get(node);
420 malloc_mutex_unlock(&arena->huge_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800421
Christopher Ferris83e57672015-04-22 06:59:28 +0000422 return (size);
Jason Evanse476f8a2010-01-16 09:53:50 -0800423}
424
Christopher Ferris83e57672015-04-22 06:59:28 +0000425prof_tctx_t *
426huge_prof_tctx_get(const void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800427{
Christopher Ferris83e57672015-04-22 06:59:28 +0000428 prof_tctx_t *tctx;
429 extent_node_t *node;
430 arena_t *arena;
Jason Evans6109fe02010-02-10 10:37:56 -0800431
Christopher Ferris83e57672015-04-22 06:59:28 +0000432 node = huge_node_get(ptr);
433 arena = extent_node_arena_get(node);
434 malloc_mutex_lock(&arena->huge_mtx);
435 tctx = extent_node_prof_tctx_get(node);
436 malloc_mutex_unlock(&arena->huge_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800437
Christopher Ferris83e57672015-04-22 06:59:28 +0000438 return (tctx);
Jason Evans6109fe02010-02-10 10:37:56 -0800439}
440
441void
Christopher Ferris83e57672015-04-22 06:59:28 +0000442huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
Jason Evans6109fe02010-02-10 10:37:56 -0800443{
Christopher Ferris83e57672015-04-22 06:59:28 +0000444 extent_node_t *node;
445 arena_t *arena;
Jason Evans6109fe02010-02-10 10:37:56 -0800446
Christopher Ferris83e57672015-04-22 06:59:28 +0000447 node = huge_node_get(ptr);
448 arena = extent_node_arena_get(node);
449 malloc_mutex_lock(&arena->huge_mtx);
450 extent_node_prof_tctx_set(node, tctx);
451 malloc_mutex_unlock(&arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700452}
Jason Evans708ed792015-09-14 23:48:11 -0700453
454void
455huge_prof_tctx_reset(const void *ptr)
456{
457
458 huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
459}