blob: 5eade416d697ead389240d0f4975c424ee73f06e [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_CHUNK_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans609ae592012-10-11 13:53:15 -07007const char *opt_dss = DSS_DEFAULT;
Matthijsa1aaf942015-06-25 22:53:58 +02008size_t opt_lg_chunk = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evanscbf3a6d2015-02-11 12:24:27 -080010/* Used exclusively for gdump triggering. */
11static size_t curchunks;
12static size_t highchunks;
Jason Evans7ca0fdf2012-04-12 20:20:58 -070013
Jason Evans8d0e04d2015-01-30 22:54:08 -080014rtree_t chunks_rtree;
Jason Evans2dbecf12010-09-05 10:35:13 -070015
Jason Evanse476f8a2010-01-16 09:53:50 -080016/* Various chunk-related settings. */
17size_t chunksize;
18size_t chunksize_mask; /* (chunksize - 1). */
19size_t chunk_npages;
Jason Evanse476f8a2010-01-16 09:53:50 -080020
Jason Evansb49a3342015-07-28 11:28:19 -040021static void *chunk_alloc_default(void *new_addr, size_t size,
Jason Evans8fadb1a2015-08-04 10:49:46 -070022 size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
Jason Evansb49a3342015-07-28 11:28:19 -040024 unsigned arena_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -070025static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
26 size_t length, unsigned arena_ind);
27static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
28 size_t length, unsigned arena_ind);
Jason Evansb49a3342015-07-28 11:28:19 -040029static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
30 size_t length, unsigned arena_ind);
31static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
32 size_t size_b, bool committed, unsigned arena_ind);
33static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34 size_t size_b, bool committed, unsigned arena_ind);
35
36const chunk_hooks_t chunk_hooks_default = {
37 chunk_alloc_default,
38 chunk_dalloc_default,
39 chunk_commit_default,
40 chunk_decommit_default,
41 chunk_purge_default,
42 chunk_split_default,
43 chunk_merge_default
44};
45
Jason Evanse476f8a2010-01-16 09:53:50 -080046/******************************************************************************/
Jason Evansb49a3342015-07-28 11:28:19 -040047/*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
Jason Evansc1e00ef2016-05-10 22:21:10 -070052static void chunk_record(tsdn_t *tsdn, arena_t *arena,
Jason Evans5c77af92016-11-14 18:27:23 -080053 chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
54 extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
55 bool zeroed, bool committed);
Jason Evansb49a3342015-07-28 11:28:19 -040056
57/******************************************************************************/
58
59static chunk_hooks_t
60chunk_hooks_get_locked(arena_t *arena)
61{
62
63 return (arena->chunk_hooks);
64}
65
66chunk_hooks_t
Jason Evansc1e00ef2016-05-10 22:21:10 -070067chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
Jason Evansb49a3342015-07-28 11:28:19 -040068{
69 chunk_hooks_t chunk_hooks;
70
Jason Evansc1e00ef2016-05-10 22:21:10 -070071 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -040072 chunk_hooks = chunk_hooks_get_locked(arena);
Jason Evansc1e00ef2016-05-10 22:21:10 -070073 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -040074
75 return (chunk_hooks);
76}
77
78chunk_hooks_t
Jason Evansc1e00ef2016-05-10 22:21:10 -070079chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
Jason Evansb49a3342015-07-28 11:28:19 -040080{
81 chunk_hooks_t old_chunk_hooks;
82
Jason Evansc1e00ef2016-05-10 22:21:10 -070083 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -040084 old_chunk_hooks = arena->chunk_hooks;
Jason Evans8fadb1a2015-08-04 10:49:46 -070085 /*
86 * Copy each field atomically so that it is impossible for readers to
87 * see partially updated pointers. There are places where readers only
88 * need one hook function pointer (therefore no need to copy the
89 * entirety of arena->chunk_hooks), and stale reads do not affect
90 * correctness, so they perform unlocked reads.
91 */
92#define ATOMIC_COPY_HOOK(n) do { \
Jason Evans56af64d2015-08-12 16:38:20 -070093 union { \
94 chunk_##n##_t **n; \
95 void **v; \
96 } u; \
97 u.n = &arena->chunk_hooks.n; \
98 atomic_write_p(u.v, chunk_hooks->n); \
Jason Evans8fadb1a2015-08-04 10:49:46 -070099} while (0)
100 ATOMIC_COPY_HOOK(alloc);
101 ATOMIC_COPY_HOOK(dalloc);
102 ATOMIC_COPY_HOOK(commit);
103 ATOMIC_COPY_HOOK(decommit);
104 ATOMIC_COPY_HOOK(purge);
105 ATOMIC_COPY_HOOK(split);
106 ATOMIC_COPY_HOOK(merge);
107#undef ATOMIC_COPY_HOOK
Jason Evansc1e00ef2016-05-10 22:21:10 -0700108 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -0400109
110 return (old_chunk_hooks);
111}
112
113static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700114chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700115 chunk_hooks_t *chunk_hooks, bool locked)
Jason Evansb49a3342015-07-28 11:28:19 -0400116{
117 static const chunk_hooks_t uninitialized_hooks =
118 CHUNK_HOOKS_INITIALIZER;
119
120 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
121 0) {
122 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
Jason Evansc1e00ef2016-05-10 22:21:10 -0700123 chunk_hooks_get(tsdn, arena);
Jason Evansb49a3342015-07-28 11:28:19 -0400124 }
125}
126
127static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700128chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
Jason Evansb49a3342015-07-28 11:28:19 -0400129 chunk_hooks_t *chunk_hooks)
130{
131
Jason Evansc1e00ef2016-05-10 22:21:10 -0700132 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400133}
134
135static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700136chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700137 chunk_hooks_t *chunk_hooks)
Jason Evansb49a3342015-07-28 11:28:19 -0400138{
139
Jason Evansc1e00ef2016-05-10 22:21:10 -0700140 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
Jason Evansb49a3342015-07-28 11:28:19 -0400141}
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700142
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800143bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700144chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800145{
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700146
Jason Evansee41ad42015-02-15 18:04:46 -0800147 assert(extent_node_addr_get(node) == chunk);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800148
149 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
150 return (true);
151 if (config_prof && opt_prof) {
Jason Evansee41ad42015-02-15 18:04:46 -0800152 size_t size = extent_node_size_get(node);
153 size_t nadd = (size == 0) ? 1 : size / chunksize;
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800154 size_t cur = atomic_add_z(&curchunks, nadd);
155 size_t high = atomic_read_z(&highchunks);
156 while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
157 /*
158 * Don't refresh cur, because it may have decreased
159 * since this thread lost the highchunks update race.
160 */
161 high = atomic_read_z(&highchunks);
162 }
163 if (cur > high && prof_gdump_get_unlocked())
Jason Evansc1e00ef2016-05-10 22:21:10 -0700164 prof_gdump(tsdn);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800165 }
166
167 return (false);
168}
169
170void
171chunk_deregister(const void *chunk, const extent_node_t *node)
172{
173 bool err;
174
175 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
176 assert(!err);
177 if (config_prof && opt_prof) {
Jason Evansee41ad42015-02-15 18:04:46 -0800178 size_t size = extent_node_size_get(node);
179 size_t nsub = (size == 0) ? 1 : size / chunksize;
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800180 assert(atomic_read_z(&curchunks) >= nsub);
181 atomic_sub_z(&curchunks, nsub);
182 }
183}
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700184
Jason Evansaa282662015-07-15 16:02:21 -0700185/*
Jason Evans5c77af92016-11-14 18:27:23 -0800186 * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
187 * best fits.
Jason Evansaa282662015-07-15 16:02:21 -0700188 */
Jason Evans97c04a92015-03-06 19:57:36 -0800189static extent_node_t *
Jason Evans5c77af92016-11-14 18:27:23 -0800190chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -0800191{
Jason Evansec8f0992017-02-26 12:58:15 -0800192 extent_node_t *node;
193 size_t qsize;
Jason Evansaa282662015-07-15 16:02:21 -0700194 extent_node_t key;
Jason Evans97c04a92015-03-06 19:57:36 -0800195
196 assert(size == CHUNK_CEILING(size));
197
Jason Evansec8f0992017-02-26 12:58:15 -0800198 qsize = extent_size_quantize_ceil(size);
199 extent_node_init(&key, arena, NULL, qsize, 0, false, false);
200 node = extent_tree_szsnad_nsearch(chunks_szsnad, &key);
201 assert(node == NULL || extent_node_size_get(node) >= size);
202 return node;
Jason Evans97c04a92015-03-06 19:57:36 -0800203}
204
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700205static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700206chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800207 extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
208 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
209 bool *commit, bool dalloc_node)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700210{
211 void *ret;
212 extent_node_t *node;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700213 size_t alloc_size, leadsize, trailsize;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700214 bool zeroed, committed;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700215
Jason Evansb9408d72016-11-11 21:58:05 -0800216 assert(CHUNK_CEILING(size) == size);
217 assert(alignment > 0);
Jason Evans8ddc9322015-01-30 21:22:54 -0800218 assert(new_addr == NULL || alignment == chunksize);
Jason Evansb9408d72016-11-11 21:58:05 -0800219 assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
Jason Evansb49a3342015-07-28 11:28:19 -0400220 /*
221 * Cached chunks use the node linkage embedded in their headers, in
222 * which case dalloc_node is true, and new_addr is non-NULL because
223 * we're operating on a specific chunk.
224 */
Jason Evans99bd94f2015-02-18 16:40:53 -0800225 assert(dalloc_node || new_addr != NULL);
Jason Evans8ddc9322015-01-30 21:22:54 -0800226
Jason Evansb9408d72016-11-11 21:58:05 -0800227 alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700228 /* Beware size_t wrap-around. */
229 if (alloc_size < size)
230 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700231 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
232 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
Jason Evans04ca7582015-03-06 23:25:13 -0800233 if (new_addr != NULL) {
Jason Evans97c04a92015-03-06 19:57:36 -0800234 extent_node_t key;
Jason Evans5c77af92016-11-14 18:27:23 -0800235 extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
Jason Evansb49a3342015-07-28 11:28:19 -0400236 false);
Jason Evans97c04a92015-03-06 19:57:36 -0800237 node = extent_tree_ad_search(chunks_ad, &key);
Jason Evans04ca7582015-03-06 23:25:13 -0800238 } else {
Jason Evans5c77af92016-11-14 18:27:23 -0800239 node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
Jason Evans04ca7582015-03-06 23:25:13 -0800240 }
Jason Evansee41ad42015-02-15 18:04:46 -0800241 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
242 size)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700243 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700244 return (NULL);
245 }
Jason Evansee41ad42015-02-15 18:04:46 -0800246 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
247 alignment) - (uintptr_t)extent_node_addr_get(node);
Jason Evans8ddc9322015-01-30 21:22:54 -0800248 assert(new_addr == NULL || leadsize == 0);
Jason Evansee41ad42015-02-15 18:04:46 -0800249 assert(extent_node_size_get(node) >= leadsize + size);
250 trailsize = extent_node_size_get(node) - leadsize - size;
251 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
Jason Evans5c77af92016-11-14 18:27:23 -0800252 *sn = extent_node_sn_get(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800253 zeroed = extent_node_zeroed_get(node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800254 if (zeroed)
Jason Evans8fadb1a2015-08-04 10:49:46 -0700255 *zero = true;
256 committed = extent_node_committed_get(node);
257 if (committed)
258 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400259 /* Split the lead. */
260 if (leadsize != 0 &&
261 chunk_hooks->split(extent_node_addr_get(node),
262 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700263 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evansb49a3342015-07-28 11:28:19 -0400264 return (NULL);
265 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700266 /* Remove node from the tree. */
Jason Evans5c77af92016-11-14 18:27:23 -0800267 extent_tree_szsnad_remove(chunks_szsnad, node);
Jason Evans609ae592012-10-11 13:53:15 -0700268 extent_tree_ad_remove(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800269 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700270 if (leadsize != 0) {
271 /* Insert the leading space as a smaller chunk. */
Jason Evansee41ad42015-02-15 18:04:46 -0800272 extent_node_size_set(node, leadsize);
Jason Evans5c77af92016-11-14 18:27:23 -0800273 extent_tree_szsnad_insert(chunks_szsnad, node);
Jason Evans609ae592012-10-11 13:53:15 -0700274 extent_tree_ad_insert(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800275 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700276 node = NULL;
277 }
278 if (trailsize != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400279 /* Split the trail. */
280 if (chunk_hooks->split(ret, size + trailsize, size,
281 trailsize, false, arena->ind)) {
282 if (dalloc_node && node != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700283 arena_node_dalloc(tsdn, arena, node);
284 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evans5c77af92016-11-14 18:27:23 -0800285 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
286 chunks_ad, cache, ret, size + trailsize, *sn,
287 zeroed, committed);
Jason Evansb49a3342015-07-28 11:28:19 -0400288 return (NULL);
289 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700290 /* Insert the trailing space as a smaller chunk. */
291 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700292 node = arena_node_alloc(tsdn, arena);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700293 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700294 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
295 chunk_record(tsdn, arena, chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800296 chunks_szsnad, chunks_ad, cache, ret, size
297 + trailsize, *sn, zeroed, committed);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700298 return (NULL);
299 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700300 }
Jason Evansa4e18882015-02-17 15:13:52 -0800301 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
Jason Evans5c77af92016-11-14 18:27:23 -0800302 trailsize, *sn, zeroed, committed);
303 extent_tree_szsnad_insert(chunks_szsnad, node);
Jason Evans609ae592012-10-11 13:53:15 -0700304 extent_tree_ad_insert(chunks_ad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800305 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700306 node = NULL;
307 }
Jason Evans8fadb1a2015-08-04 10:49:46 -0700308 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700309 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evans5c77af92016-11-14 18:27:23 -0800310 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
311 cache, ret, size, *sn, zeroed, committed);
Jason Evansb49a3342015-07-28 11:28:19 -0400312 return (NULL);
313 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700314 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700315
Jason Evans35e3fd92015-02-18 16:51:51 -0800316 assert(dalloc_node || node != NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -0800317 if (dalloc_node && node != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700318 arena_node_dalloc(tsdn, arena, node);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800319 if (*zero) {
Jason Evans551ebc42014-10-03 10:16:09 -0700320 if (!zeroed)
Jason Evans14a2c6a2013-01-21 19:56:34 -0800321 memset(ret, 0, size);
322 else if (config_debug) {
323 size_t i;
324 size_t *p = (size_t *)(uintptr_t)ret;
325
Jason Evans14a2c6a2013-01-21 19:56:34 -0800326 for (i = 0; i < size / sizeof(size_t); i++)
327 assert(p[i] == 0);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800328 }
Elliot Ronaghana6a8e402016-06-10 16:28:35 -0700329 if (config_valgrind)
330 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
Jason Evans14a2c6a2013-01-21 19:56:34 -0800331 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700332 return (ret);
333}
Jason Evanse476f8a2010-01-16 09:53:50 -0800334
Jason Evans41631d02010-01-24 17:13:07 -0800335/*
Jason Evans551ebc42014-10-03 10:16:09 -0700336 * If the caller specifies (!*zero), it is still possible to receive zeroed
337 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
338 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
339 * them if they are returned.
Jason Evans41631d02010-01-24 17:13:07 -0800340 */
aravindfb7fe502014-05-05 15:16:56 -0700341static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700342chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
Jason Evansb2c0d632016-04-13 23:36:15 -0700343 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
Jason Evanse476f8a2010-01-16 09:53:50 -0800344{
345 void *ret;
346
347 assert(size != 0);
348 assert((size & chunksize_mask) == 0);
Jason Evansde6fbdb2012-05-09 13:05:04 -0700349 assert(alignment != 0);
Mike Hommeyeae26902012-04-10 19:50:33 +0200350 assert((alignment & chunksize_mask) == 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800351
Jason Evans609ae592012-10-11 13:53:15 -0700352 /* "primary" dss. */
Jason Evans0fd663e2015-01-25 17:31:24 -0800353 if (have_dss && dss_prec == dss_prec_primary && (ret =
Jason Evansc1e00ef2016-05-10 22:21:10 -0700354 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
Jason Evansb2c0d632016-04-13 23:36:15 -0700355 commit)) != NULL)
Jason Evans0fd663e2015-01-25 17:31:24 -0800356 return (ret);
Jason Evansc7a9a6c2016-02-24 17:18:44 -0800357 /* mmap. */
358 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
359 NULL)
aravindfb7fe502014-05-05 15:16:56 -0700360 return (ret);
Jason Evans609ae592012-10-11 13:53:15 -0700361 /* "secondary" dss. */
Jason Evans0fd663e2015-01-25 17:31:24 -0800362 if (have_dss && dss_prec == dss_prec_secondary && (ret =
Jason Evansc1e00ef2016-05-10 22:21:10 -0700363 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
Jason Evansb2c0d632016-04-13 23:36:15 -0700364 commit)) != NULL)
Jason Evans0fd663e2015-01-25 17:31:24 -0800365 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -0800366
367 /* All strategies for allocation failed. */
aravindfb7fe502014-05-05 15:16:56 -0700368 return (NULL);
369}
370
Jason Evanse2deab72014-05-15 22:22:27 -0700371void *
372chunk_alloc_base(size_t size)
373{
374 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700375 bool zero, commit;
Jason Evanse2deab72014-05-15 22:22:27 -0700376
Jason Evansf500a102015-01-30 21:49:19 -0800377 /*
378 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
379 * because it's critical that chunk_alloc_base() return untouched
380 * demand-zeroed virtual memory.
381 */
382 zero = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700383 commit = true;
Jason Evansc7a9a6c2016-02-24 17:18:44 -0800384 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800385 if (ret == NULL)
386 return (NULL);
387 if (config_valgrind)
388 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
Jason Evansf500a102015-01-30 21:49:19 -0800389
Jason Evanse2deab72014-05-15 22:22:27 -0700390 return (ret);
391}
392
393void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700394chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800395 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
396 bool *commit, bool dalloc_node)
Jason Evanse2deab72014-05-15 22:22:27 -0700397{
Jason Evans4f6f2b12015-06-22 14:38:06 -0700398 void *ret;
Jason Evanse2deab72014-05-15 22:22:27 -0700399
Jason Evans99bd94f2015-02-18 16:40:53 -0800400 assert(size != 0);
401 assert((size & chunksize_mask) == 0);
402 assert(alignment != 0);
403 assert((alignment & chunksize_mask) == 0);
Jason Evanse2deab72014-05-15 22:22:27 -0700404
Jason Evansc1e00ef2016-05-10 22:21:10 -0700405 ret = chunk_recycle(tsdn, arena, chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800406 &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
407 new_addr, size, alignment, sn, zero, commit, dalloc_node);
Jason Evans4f6f2b12015-06-22 14:38:06 -0700408 if (ret == NULL)
409 return (NULL);
410 if (config_valgrind)
411 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
412 return (ret);
Jason Evanse2deab72014-05-15 22:22:27 -0700413}
414
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800415static arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700416chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
aravindfb7fe502014-05-05 15:16:56 -0700417{
Jason Evans8bb31982014-10-07 23:14:57 -0700418 arena_t *arena;
419
Jason Evansc1e00ef2016-05-10 22:21:10 -0700420 arena = arena_get(tsdn, arena_ind, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700421 /*
422 * The arena we're allocating on behalf of must have been initialized
423 * already.
424 */
425 assert(arena != NULL);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800426 return (arena);
427}
aravindfb7fe502014-05-05 15:16:56 -0700428
Jason Evans99bd94f2015-02-18 16:40:53 -0800429static void *
Jason Evans09d7bdb2016-06-07 14:00:58 -0700430chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
431 size_t size, size_t alignment, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800432{
433 void *ret;
434
Jason Evansc1e00ef2016-05-10 22:21:10 -0700435 ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
Jason Evansb2c0d632016-04-13 23:36:15 -0700436 commit, arena->dss_prec);
Jason Evans99bd94f2015-02-18 16:40:53 -0800437 if (ret == NULL)
438 return (NULL);
439 if (config_valgrind)
440 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
441
442 return (ret);
443}
444
buchgrd4126242015-12-09 18:00:57 +0100445static void *
Jason Evans09d7bdb2016-06-07 14:00:58 -0700446chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
447 bool *commit, unsigned arena_ind)
448{
449 tsdn_t *tsdn;
450 arena_t *arena;
451
452 tsdn = tsdn_fetch();
453 arena = chunk_arena_get(tsdn, arena_ind);
454
455 return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
456 zero, commit));
457}
458
459static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700460chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800461 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
462 bool *commit)
buchgrd4126242015-12-09 18:00:57 +0100463{
Jason Evans04c3c0f2016-05-03 22:11:35 -0700464 void *ret;
buchgrd4126242015-12-09 18:00:57 +0100465
466 assert(size != 0);
467 assert((size & chunksize_mask) == 0);
468 assert(alignment != 0);
469 assert((alignment & chunksize_mask) == 0);
470
Jason Evansc1e00ef2016-05-10 22:21:10 -0700471 ret = chunk_recycle(tsdn, arena, chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800472 &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
473 new_addr, size, alignment, sn, zero, commit, true);
Jason Evans04c3c0f2016-05-03 22:11:35 -0700474
475 if (config_stats && ret != NULL)
476 arena->stats.retained -= size;
477
478 return (ret);
buchgrd4126242015-12-09 18:00:57 +0100479}
480
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800481void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700482chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800483 void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
484 bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800485{
486 void *ret;
487
Jason Evansc1e00ef2016-05-10 22:21:10 -0700488 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
buchgrd4126242015-12-09 18:00:57 +0100489
Jason Evansc1e00ef2016-05-10 22:21:10 -0700490 ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
Jason Evans5c77af92016-11-14 18:27:23 -0800491 alignment, sn, zero, commit);
buchgrd4126242015-12-09 18:00:57 +0100492 if (ret == NULL) {
Jason Evans09d7bdb2016-06-07 14:00:58 -0700493 if (chunk_hooks->alloc == chunk_alloc_default) {
494 /* Call directly to propagate tsdn. */
495 ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
496 size, alignment, zero, commit);
497 } else {
498 ret = chunk_hooks->alloc(new_addr, size, alignment,
499 zero, commit, arena->ind);
500 }
501
buchgrd4126242015-12-09 18:00:57 +0100502 if (ret == NULL)
503 return (NULL);
Elliot Ronaghanc7d52982016-06-07 14:30:39 -0700504
Jason Evans5c77af92016-11-14 18:27:23 -0800505 *sn = arena_extent_sn_next(arena);
506
Elliot Ronaghanc7d52982016-06-07 14:30:39 -0700507 if (config_valgrind && chunk_hooks->alloc !=
508 chunk_alloc_default)
509 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
buchgrd4126242015-12-09 18:00:57 +0100510 }
511
Jason Evans99bd94f2015-02-18 16:40:53 -0800512 return (ret);
aravindfb7fe502014-05-05 15:16:56 -0700513}
514
Jason Evansb49a3342015-07-28 11:28:19 -0400515static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700516chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800517 extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
518 void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700519{
Jason Evans7de92762012-10-08 17:56:11 -0700520 bool unzeroed;
Jason Evansee41ad42015-02-15 18:04:46 -0800521 extent_node_t *node, *prev;
522 extent_node_t key;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700523
Jason Evans738e0892015-02-18 01:15:50 -0800524 assert(!cache || !zeroed);
525 unzeroed = cache || !zeroed;
Jason Evansbd87b012014-04-15 16:35:08 -0700526 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700527
Jason Evansc1e00ef2016-05-10 22:21:10 -0700528 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
529 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
Jason Evans5c77af92016-11-14 18:27:23 -0800530 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
Jason Evansb49a3342015-07-28 11:28:19 -0400531 false, false);
Jason Evans609ae592012-10-11 13:53:15 -0700532 node = extent_tree_ad_nsearch(chunks_ad, &key);
Jason Evans374d26a2012-05-09 14:48:35 -0700533 /* Try to coalesce forward. */
Jason Evansee41ad42015-02-15 18:04:46 -0800534 if (node != NULL && extent_node_addr_get(node) ==
Jason Evansb49a3342015-07-28 11:28:19 -0400535 extent_node_addr_get(&key) && extent_node_committed_get(node) ==
536 committed && !chunk_hooks->merge(chunk, size,
537 extent_node_addr_get(node), extent_node_size_get(node), false,
538 arena->ind)) {
Jason Evans374d26a2012-05-09 14:48:35 -0700539 /*
540 * Coalesce chunk with the following address range. This does
541 * not change the position within chunks_ad, so only
Jason Evans5c77af92016-11-14 18:27:23 -0800542 * remove/insert from/into chunks_szsnad.
Jason Evans374d26a2012-05-09 14:48:35 -0700543 */
Jason Evans5c77af92016-11-14 18:27:23 -0800544 extent_tree_szsnad_remove(chunks_szsnad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800545 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800546 extent_node_addr_set(node, chunk);
Jason Evansa4e18882015-02-17 15:13:52 -0800547 extent_node_size_set(node, size + extent_node_size_get(node));
Jason Evans5c77af92016-11-14 18:27:23 -0800548 if (sn < extent_node_sn_get(node))
549 extent_node_sn_set(node, sn);
Jason Evansee41ad42015-02-15 18:04:46 -0800550 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
551 !unzeroed);
Jason Evans5c77af92016-11-14 18:27:23 -0800552 extent_tree_szsnad_insert(chunks_szsnad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800553 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans374d26a2012-05-09 14:48:35 -0700554 } else {
555 /* Coalescing forward failed, so insert a new node. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700556 node = arena_node_alloc(tsdn, arena);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800557 if (node == NULL) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700558 /*
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800559 * Node allocation failed, which is an exceedingly
Jason Evansee41ad42015-02-15 18:04:46 -0800560 * unlikely failure. Leak chunk after making sure its
561 * pages have already been purged, so that this is only
562 * a virtual memory leak.
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700563 */
Jason Evans8d6a3e82015-03-18 18:55:33 -0700564 if (cache) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700565 chunk_purge_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700566 chunk, size, 0, size);
Jason Evans8d6a3e82015-03-18 18:55:33 -0700567 }
Jason Evans741fbc62013-04-17 09:57:11 -0700568 goto label_return;
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700569 }
Jason Evans5c77af92016-11-14 18:27:23 -0800570 extent_node_init(node, arena, chunk, size, sn, !unzeroed,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700571 committed);
Jason Evans609ae592012-10-11 13:53:15 -0700572 extent_tree_ad_insert(chunks_ad, node);
Jason Evans5c77af92016-11-14 18:27:23 -0800573 extent_tree_szsnad_insert(chunks_szsnad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800574 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700575 }
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700576
577 /* Try to coalesce backward. */
Jason Evans609ae592012-10-11 13:53:15 -0700578 prev = extent_tree_ad_prev(chunks_ad, node);
Jason Evansee41ad42015-02-15 18:04:46 -0800579 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
Jason Evansb49a3342015-07-28 11:28:19 -0400580 extent_node_size_get(prev)) == chunk &&
581 extent_node_committed_get(prev) == committed &&
582 !chunk_hooks->merge(extent_node_addr_get(prev),
583 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700584 /*
585 * Coalesce chunk with the previous address range. This does
586 * not change the position within chunks_ad, so only
Jason Evans5c77af92016-11-14 18:27:23 -0800587 * remove/insert node from/into chunks_szsnad.
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700588 */
Jason Evans5c77af92016-11-14 18:27:23 -0800589 extent_tree_szsnad_remove(chunks_szsnad, prev);
Jason Evans609ae592012-10-11 13:53:15 -0700590 extent_tree_ad_remove(chunks_ad, prev);
Jason Evans738e0892015-02-18 01:15:50 -0800591 arena_chunk_cache_maybe_remove(arena, prev, cache);
Jason Evans5c77af92016-11-14 18:27:23 -0800592 extent_tree_szsnad_remove(chunks_szsnad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800593 arena_chunk_cache_maybe_remove(arena, node, cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800594 extent_node_addr_set(node, extent_node_addr_get(prev));
Jason Evansa4e18882015-02-17 15:13:52 -0800595 extent_node_size_set(node, extent_node_size_get(prev) +
596 extent_node_size_get(node));
Jason Evans5c77af92016-11-14 18:27:23 -0800597 if (extent_node_sn_get(prev) < extent_node_sn_get(node))
598 extent_node_sn_set(node, extent_node_sn_get(prev));
Jason Evansa4e18882015-02-17 15:13:52 -0800599 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
600 extent_node_zeroed_get(node));
Jason Evans5c77af92016-11-14 18:27:23 -0800601 extent_tree_szsnad_insert(chunks_szsnad, node);
Jason Evans738e0892015-02-18 01:15:50 -0800602 arena_chunk_cache_maybe_insert(arena, node, cache);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700603
Jason Evansc1e00ef2016-05-10 22:21:10 -0700604 arena_node_dalloc(tsdn, arena, prev);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700605 }
Jason Evans741fbc62013-04-17 09:57:11 -0700606
607label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -0700608 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700609}
610
Jason Evans99bd94f2015-02-18 16:40:53 -0800611void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700612chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800613 void *chunk, size_t size, size_t sn, bool committed)
Jason Evansee41ad42015-02-15 18:04:46 -0800614{
615
616 assert(chunk != NULL);
617 assert(CHUNK_ADDR2BASE(chunk) == chunk);
618 assert(size != 0);
619 assert((size & chunksize_mask) == 0);
620
Jason Evans5c77af92016-11-14 18:27:23 -0800621 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
622 &arena->chunks_ad_cached, true, chunk, size, sn, false,
623 committed);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700624 arena_maybe_purge(tsdn, arena);
Jason Evansee41ad42015-02-15 18:04:46 -0800625}
626
Jason Evansce7c0f92016-03-30 18:36:04 -0700627static bool
Jason Evanse2bcf032016-10-13 12:18:38 -0700628chunk_dalloc_default_impl(void *chunk, size_t size)
Jason Evans09d7bdb2016-06-07 14:00:58 -0700629{
630
Jason Evanse2bcf032016-10-13 12:18:38 -0700631 if (!have_dss || !chunk_in_dss(chunk))
Jason Evans09d7bdb2016-06-07 14:00:58 -0700632 return (chunk_dalloc_mmap(chunk, size));
633 return (true);
634}
635
636static bool
Jason Evansce7c0f92016-03-30 18:36:04 -0700637chunk_dalloc_default(void *chunk, size_t size, bool committed,
638 unsigned arena_ind)
639{
640
Jason Evanse2bcf032016-10-13 12:18:38 -0700641 return (chunk_dalloc_default_impl(chunk, size));
Jason Evansce7c0f92016-03-30 18:36:04 -0700642}
643
Jason Evanse476f8a2010-01-16 09:53:50 -0800644void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700645chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans5c77af92016-11-14 18:27:23 -0800646 void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
Jason Evanse476f8a2010-01-16 09:53:50 -0800647{
Jason Evans09d7bdb2016-06-07 14:00:58 -0700648 bool err;
Jason Evanse476f8a2010-01-16 09:53:50 -0800649
650 assert(chunk != NULL);
651 assert(CHUNK_ADDR2BASE(chunk) == chunk);
652 assert(size != 0);
653 assert((size & chunksize_mask) == 0);
654
Jason Evansc1e00ef2016-05-10 22:21:10 -0700655 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
Jason Evansb49a3342015-07-28 11:28:19 -0400656 /* Try to deallocate. */
Jason Evans09d7bdb2016-06-07 14:00:58 -0700657 if (chunk_hooks->dalloc == chunk_dalloc_default) {
658 /* Call directly to propagate tsdn. */
Jason Evanse2bcf032016-10-13 12:18:38 -0700659 err = chunk_dalloc_default_impl(chunk, size);
Jason Evans09d7bdb2016-06-07 14:00:58 -0700660 } else
661 err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
662
663 if (!err)
Jason Evansb49a3342015-07-28 11:28:19 -0400664 return;
665 /* Try to decommit; purge if that fails. */
Jason Evans8fadb1a2015-08-04 10:49:46 -0700666 if (committed) {
667 committed = chunk_hooks->decommit(chunk, size, 0, size,
668 arena->ind);
669 }
Jason Evans6ed18cb2015-08-12 15:20:34 -0700670 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
Jason Evansb49a3342015-07-28 11:28:19 -0400671 arena->ind);
Jason Evans5c77af92016-11-14 18:27:23 -0800672 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
673 &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
674 committed);
Jason Evans04c3c0f2016-05-03 22:11:35 -0700675
676 if (config_stats)
677 arena->stats.retained += size;
Jason Evanse2deab72014-05-15 22:22:27 -0700678}
679
Jason Evansb49a3342015-07-28 11:28:19 -0400680static bool
Jason Evans8fadb1a2015-08-04 10:49:46 -0700681chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
682 unsigned arena_ind)
Jason Evansb49a3342015-07-28 11:28:19 -0400683{
684
Jason Evans8fadb1a2015-08-04 10:49:46 -0700685 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
686 length));
Jason Evansb49a3342015-07-28 11:28:19 -0400687}
688
689static bool
Jason Evans8fadb1a2015-08-04 10:49:46 -0700690chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
691 unsigned arena_ind)
Jason Evansb49a3342015-07-28 11:28:19 -0400692{
693
Jason Evans8fadb1a2015-08-04 10:49:46 -0700694 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
695 length));
Jason Evansb49a3342015-07-28 11:28:19 -0400696}
697
Jason Evansce7c0f92016-03-30 18:36:04 -0700698static bool
699chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
700 unsigned arena_ind)
Jason Evans8d6a3e82015-03-18 18:55:33 -0700701{
702
703 assert(chunk != NULL);
704 assert(CHUNK_ADDR2BASE(chunk) == chunk);
705 assert((offset & PAGE_MASK) == 0);
706 assert(length != 0);
707 assert((length & PAGE_MASK) == 0);
708
709 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
710 length));
711}
712
Jason Evans8d6a3e82015-03-18 18:55:33 -0700713bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700714chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700715 void *chunk, size_t size, size_t offset, size_t length)
Jason Evans8d6a3e82015-03-18 18:55:33 -0700716{
717
Jason Evansc1e00ef2016-05-10 22:21:10 -0700718 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
Jason Evansb49a3342015-07-28 11:28:19 -0400719 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
720}
721
722static bool
723chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
724 bool committed, unsigned arena_ind)
725{
726
727 if (!maps_coalesce)
728 return (true);
729 return (false);
730}
731
732static bool
Jason Evanse2bcf032016-10-13 12:18:38 -0700733chunk_merge_default_impl(void *chunk_a, void *chunk_b)
Jason Evansb49a3342015-07-28 11:28:19 -0400734{
735
736 if (!maps_coalesce)
737 return (true);
Jason Evanse2bcf032016-10-13 12:18:38 -0700738 if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
Jason Evans09d7bdb2016-06-07 14:00:58 -0700739 return (true);
Jason Evansb49a3342015-07-28 11:28:19 -0400740
741 return (false);
Jason Evans8d6a3e82015-03-18 18:55:33 -0700742}
743
Jason Evans09d7bdb2016-06-07 14:00:58 -0700744static bool
745chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
746 bool committed, unsigned arena_ind)
747{
Jason Evans09d7bdb2016-06-07 14:00:58 -0700748
Jason Evanse2bcf032016-10-13 12:18:38 -0700749 return (chunk_merge_default_impl(chunk_a, chunk_b));
Jason Evans09d7bdb2016-06-07 14:00:58 -0700750}
751
Jason Evans8d0e04d2015-01-30 22:54:08 -0800752static rtree_node_elm_t *
753chunks_rtree_node_alloc(size_t nelms)
754{
755
Jason Evans09d7bdb2016-06-07 14:00:58 -0700756 return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
Jason Evans8d0e04d2015-01-30 22:54:08 -0800757 sizeof(rtree_node_elm_t)));
758}
759
Jason Evanse476f8a2010-01-16 09:53:50 -0800760bool
Jason Evansa8f8d752012-04-21 19:17:21 -0700761chunk_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800762{
Matthijsa1aaf942015-06-25 22:53:58 +0200763#ifdef _WIN32
764 SYSTEM_INFO info;
765 GetSystemInfo(&info);
766
Jason Evansb9460862015-07-07 20:16:25 -0700767 /*
768 * Verify actual page size is equal to or an integral multiple of
769 * configured page size.
770 */
Matthijsa1aaf942015-06-25 22:53:58 +0200771 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
772 return (true);
773
Jason Evansb9460862015-07-07 20:16:25 -0700774 /*
775 * Configure chunksize (if not set) to match granularity (usually 64K),
776 * so pages_map will always take fast path.
777 */
778 if (!opt_lg_chunk) {
Jason Evans9f4ee602016-02-24 10:32:45 -0800779 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
Jason Evansb9460862015-07-07 20:16:25 -0700780 - 1;
781 }
Matthijsa1aaf942015-06-25 22:53:58 +0200782#else
783 if (!opt_lg_chunk)
784 opt_lg_chunk = LG_CHUNK_DEFAULT;
785#endif
Jason Evanse476f8a2010-01-16 09:53:50 -0800786
787 /* Set variables according to the value of opt_lg_chunk. */
Jason Evans2dbecf12010-09-05 10:35:13 -0700788 chunksize = (ZU(1) << opt_lg_chunk);
Jason Evansae4c7b42012-04-02 07:04:34 -0700789 assert(chunksize >= PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800790 chunksize_mask = chunksize - 1;
Jason Evansae4c7b42012-04-02 07:04:34 -0700791 chunk_npages = (chunksize >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800792
Jason Evanse2bcf032016-10-13 12:18:38 -0700793 if (have_dss)
794 chunk_dss_boot();
Jason Evans9e1810c2016-02-24 12:42:23 -0800795 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
796 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800797 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800798
799 return (false);
800}