blob: 0571920e46e788362ea31127be04b0b66712fc69 [file] [log] [blame]
Jason Evans2dbecf12010-09-05 10:35:13 -07001#include "jemalloc/internal/jemalloc_internal.h"
2#ifndef JEMALLOC_ZONE
3# error "This source file is for zones on Darwin (OS X)."
4#endif
5
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02006/*
Jason Evansa99e0fa2016-11-02 18:06:40 -07007 * The malloc_default_purgeable_zone() function is only available on >= 10.6.
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02008 * We need to check whether it is present at runtime, thus the weak_import.
9 */
10extern malloc_zone_t *malloc_default_purgeable_zone(void)
11JEMALLOC_ATTR(weak_import);
12
Jason Evans2dbecf12010-09-05 10:35:13 -070013/******************************************************************************/
14/* Data. */
15
Jason Evansa99e0fa2016-11-02 18:06:40 -070016static malloc_zone_t *default_zone, *purgeable_zone;
17static malloc_zone_t jemalloc_zone;
18static struct malloc_introspection_t jemalloc_zone_introspect;
Jason Evans2dbecf12010-09-05 10:35:13 -070019
20/******************************************************************************/
21/* Function prototypes for non-inline static functions. */
22
23static size_t zone_size(malloc_zone_t *zone, void *ptr);
24static void *zone_malloc(malloc_zone_t *zone, size_t size);
25static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
26static void *zone_valloc(malloc_zone_t *zone, size_t size);
27static void zone_free(malloc_zone_t *zone, void *ptr);
28static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
Mike Hommey154829d2012-03-20 18:01:38 +010029#if (JEMALLOC_ZONE_VERSION >= 5)
Jason Evans2dbecf12010-09-05 10:35:13 -070030static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
Mike Hommey154829d2012-03-20 18:01:38 +010031#endif
32#if (JEMALLOC_ZONE_VERSION >= 6)
Jason Evans2dbecf12010-09-05 10:35:13 -070033 size_t size);
34static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
35 size_t size);
36#endif
37static void *zone_destroy(malloc_zone_t *zone);
38static size_t zone_good_size(malloc_zone_t *zone, size_t size);
39static void zone_force_lock(malloc_zone_t *zone);
40static void zone_force_unlock(malloc_zone_t *zone);
Jason Evans2dbecf12010-09-05 10:35:13 -070041
42/******************************************************************************/
43/*
44 * Functions.
45 */
46
47static size_t
48zone_size(malloc_zone_t *zone, void *ptr)
49{
50
51 /*
52 * There appear to be places within Darwin (such as setenv(3)) that
53 * cause calls to this function with pointers that *no* zone owns. If
54 * we knew that all pointers were owned by *some* zone, we could split
55 * our zone into two parts, and use one as the default allocator and
56 * the other as the default deallocator/reallocator. Since that will
57 * not work in practice, we must check all pointers to assure that they
58 * reside within a mapped chunk before determining size.
59 */
Jason Evansc1e00ef2016-05-10 22:21:10 -070060 return (ivsalloc(tsdn_fetch(), ptr, config_prof));
Jason Evans2dbecf12010-09-05 10:35:13 -070061}
62
63static void *
64zone_malloc(malloc_zone_t *zone, size_t size)
65{
66
Jason Evans0a5489e2012-03-01 17:19:20 -080067 return (je_malloc(size));
Jason Evans2dbecf12010-09-05 10:35:13 -070068}
69
70static void *
71zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
72{
73
Jason Evans0a5489e2012-03-01 17:19:20 -080074 return (je_calloc(num, size));
Jason Evans2dbecf12010-09-05 10:35:13 -070075}
76
77static void *
78zone_valloc(malloc_zone_t *zone, size_t size)
79{
80 void *ret = NULL; /* Assignment avoids useless compiler warning. */
81
Jason Evansae4c7b42012-04-02 07:04:34 -070082 je_posix_memalign(&ret, PAGE, size);
Jason Evans2dbecf12010-09-05 10:35:13 -070083
84 return (ret);
85}
86
87static void
88zone_free(malloc_zone_t *zone, void *ptr)
89{
90
Jason Evansc1e00ef2016-05-10 22:21:10 -070091 if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
Mike Hommey5b3db092012-03-26 18:39:35 +020092 je_free(ptr);
93 return;
94 }
95
96 free(ptr);
Jason Evans2dbecf12010-09-05 10:35:13 -070097}
98
99static void *
100zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
101{
102
Jason Evansc1e00ef2016-05-10 22:21:10 -0700103 if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
Mike Hommey5b3db092012-03-26 18:39:35 +0200104 return (je_realloc(ptr, size));
105
106 return (realloc(ptr, size));
Jason Evans2dbecf12010-09-05 10:35:13 -0700107}
108
Mike Hommey154829d2012-03-20 18:01:38 +0100109#if (JEMALLOC_ZONE_VERSION >= 5)
Jason Evans2dbecf12010-09-05 10:35:13 -0700110static void *
111zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
112{
113 void *ret = NULL; /* Assignment avoids useless compiler warning. */
114
Jason Evans0a5489e2012-03-01 17:19:20 -0800115 je_posix_memalign(&ret, alignment, size);
Jason Evans2dbecf12010-09-05 10:35:13 -0700116
117 return (ret);
118}
Mike Hommey154829d2012-03-20 18:01:38 +0100119#endif
Jason Evans2dbecf12010-09-05 10:35:13 -0700120
Mike Hommey154829d2012-03-20 18:01:38 +0100121#if (JEMALLOC_ZONE_VERSION >= 6)
Jason Evans2dbecf12010-09-05 10:35:13 -0700122static void
123zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
124{
Dmitry-Meea59ebf2015-11-12 14:59:29 +0300125 size_t alloc_size;
Jason Evans2dbecf12010-09-05 10:35:13 -0700126
Jason Evansc1e00ef2016-05-10 22:21:10 -0700127 alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
Dmitry-Meea59ebf2015-11-12 14:59:29 +0300128 if (alloc_size != 0) {
129 assert(alloc_size == size);
Mike Hommey5b3db092012-03-26 18:39:35 +0200130 je_free(ptr);
131 return;
132 }
133
134 free(ptr);
Jason Evans2dbecf12010-09-05 10:35:13 -0700135}
136#endif
137
138static void *
139zone_destroy(malloc_zone_t *zone)
140{
141
142 /* This function should never be called. */
Jason Evans6556e282013-10-21 14:56:27 -0700143 not_reached();
Jason Evans2dbecf12010-09-05 10:35:13 -0700144 return (NULL);
145}
146
147static size_t
148zone_good_size(malloc_zone_t *zone, size_t size)
149{
Jason Evans2dbecf12010-09-05 10:35:13 -0700150
Jason Evans166a7452012-02-29 12:58:39 -0800151 if (size == 0)
152 size = 1;
153 return (s2u(size));
Jason Evans2dbecf12010-09-05 10:35:13 -0700154}
155
156static void
157zone_force_lock(malloc_zone_t *zone)
158{
159
160 if (isthreaded)
161 jemalloc_prefork();
162}
163
164static void
165zone_force_unlock(malloc_zone_t *zone)
166{
167
Jason Evansa99e0fa2016-11-02 18:06:40 -0700168 /*
169 * Call jemalloc_postfork_child() rather than
170 * jemalloc_postfork_parent(), because this function is executed by both
171 * parent and child. The parent can tolerate having state
172 * reinitialized, but the child cannot unlock mutexes that were locked
173 * by the parent.
174 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700175 if (isthreaded)
Jason Evansa99e0fa2016-11-02 18:06:40 -0700176 jemalloc_postfork_child();
177}
178
179static void
180zone_init(void)
181{
182
183 jemalloc_zone.size = (void *)zone_size;
184 jemalloc_zone.malloc = (void *)zone_malloc;
185 jemalloc_zone.calloc = (void *)zone_calloc;
186 jemalloc_zone.valloc = (void *)zone_valloc;
187 jemalloc_zone.free = (void *)zone_free;
188 jemalloc_zone.realloc = (void *)zone_realloc;
189 jemalloc_zone.destroy = (void *)zone_destroy;
190 jemalloc_zone.zone_name = "jemalloc_zone";
191 jemalloc_zone.batch_malloc = NULL;
192 jemalloc_zone.batch_free = NULL;
193 jemalloc_zone.introspect = &jemalloc_zone_introspect;
194 jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
195#if (JEMALLOC_ZONE_VERSION >= 5)
196 jemalloc_zone.memalign = zone_memalign;
197#endif
198#if (JEMALLOC_ZONE_VERSION >= 6)
199 jemalloc_zone.free_definite_size = zone_free_definite_size;
200#endif
201#if (JEMALLOC_ZONE_VERSION >= 8)
202 jemalloc_zone.pressure_relief = NULL;
203#endif
204
205 jemalloc_zone_introspect.enumerator = NULL;
206 jemalloc_zone_introspect.good_size = (void *)zone_good_size;
207 jemalloc_zone_introspect.check = NULL;
208 jemalloc_zone_introspect.print = NULL;
209 jemalloc_zone_introspect.log = NULL;
210 jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
211 jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
212 jemalloc_zone_introspect.statistics = NULL;
213#if (JEMALLOC_ZONE_VERSION >= 6)
214 jemalloc_zone_introspect.zone_locked = NULL;
215#endif
216#if (JEMALLOC_ZONE_VERSION >= 7)
217 jemalloc_zone_introspect.enable_discharge_checking = NULL;
218 jemalloc_zone_introspect.disable_discharge_checking = NULL;
219 jemalloc_zone_introspect.discharge = NULL;
220# ifdef __BLOCKS__
221 jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
222# else
223 jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
224# endif
225#endif
Jason Evans2dbecf12010-09-05 10:35:13 -0700226}
227
Jason Evans57cddff2016-09-26 11:00:32 -0700228static malloc_zone_t *
Jason Evansa99e0fa2016-11-02 18:06:40 -0700229zone_default_get(void)
Mike Hommey11b5da72016-07-08 13:35:35 +0900230{
231 malloc_zone_t **zones = NULL;
232 unsigned int num_zones = 0;
233
234 /*
235 * On OSX 10.12, malloc_default_zone returns a special zone that is not
236 * present in the list of registered zones. That zone uses a "lite zone"
237 * if one is present (apparently enabled when malloc stack logging is
238 * enabled), or the first registered zone otherwise. In practice this
239 * means unless malloc stack logging is enabled, the first registered
Jason Evans57cddff2016-09-26 11:00:32 -0700240 * zone is the default. So get the list of zones to get the first one,
241 * instead of relying on malloc_default_zone.
Mike Hommey11b5da72016-07-08 13:35:35 +0900242 */
Jason Evansa99e0fa2016-11-02 18:06:40 -0700243 if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
Jason Evans57cddff2016-09-26 11:00:32 -0700244 (vm_address_t**)&zones, &num_zones)) {
245 /*
246 * Reset the value in case the failure happened after it was
247 * set.
248 */
Mike Hommey11b5da72016-07-08 13:35:35 +0900249 num_zones = 0;
250 }
251
252 if (num_zones)
Jason Evans57cddff2016-09-26 11:00:32 -0700253 return (zones[0]);
Mike Hommey11b5da72016-07-08 13:35:35 +0900254
Jason Evans57cddff2016-09-26 11:00:32 -0700255 return (malloc_default_zone());
Mike Hommey11b5da72016-07-08 13:35:35 +0900256}
257
Jason Evansa99e0fa2016-11-02 18:06:40 -0700258/* As written, this function can only promote jemalloc_zone. */
259static void
260zone_promote(void)
Jason Evans2dbecf12010-09-05 10:35:13 -0700261{
Jason Evansa99e0fa2016-11-02 18:06:40 -0700262 malloc_zone_t *zone;
Mike Hommey71a93b82012-03-27 14:20:12 +0200263
Mike Hommey71a93b82012-03-27 14:20:12 +0200264 do {
Mike Hommey6f533c12014-06-10 18:18:22 +0900265 /*
266 * Unregister and reregister the default zone. On OSX >= 10.6,
267 * unregistering takes the last registered zone and places it
268 * at the location of the specified zone. Unregistering the
269 * default zone thus makes the last registered one the default.
270 * On OSX < 10.6, unregistering shifts all registered zones.
271 * The first registered zone then becomes the default.
272 */
Mike Hommey71a93b82012-03-27 14:20:12 +0200273 malloc_zone_unregister(default_zone);
274 malloc_zone_register(default_zone);
Jason Evansa99e0fa2016-11-02 18:06:40 -0700275
Mike Hommey6f533c12014-06-10 18:18:22 +0900276 /*
277 * On OSX 10.6, having the default purgeable zone appear before
278 * the default zone makes some things crash because it thinks it
Jason Evansc21b05e2014-09-04 22:27:26 -0700279 * owns the default zone allocated pointers. We thus
280 * unregister/re-register it in order to ensure it's always
281 * after the default zone. On OSX < 10.6, there is no purgeable
282 * zone, so this does nothing. On OSX >= 10.6, unregistering
283 * replaces the purgeable zone with the last registered zone
Jason Evanse12eaf92014-12-08 14:40:14 -0800284 * above, i.e. the default zone. Registering it again then puts
Jason Evansc21b05e2014-09-04 22:27:26 -0700285 * it at the end, obviously after the default zone.
Mike Hommey6f533c12014-06-10 18:18:22 +0900286 */
Jason Evansa99e0fa2016-11-02 18:06:40 -0700287 if (purgeable_zone != NULL) {
Mike Hommey6f533c12014-06-10 18:18:22 +0900288 malloc_zone_unregister(purgeable_zone);
289 malloc_zone_register(purgeable_zone);
290 }
Mike Hommey11b5da72016-07-08 13:35:35 +0900291
Jason Evansa99e0fa2016-11-02 18:06:40 -0700292 zone = zone_default_get();
293 } while (zone != &jemalloc_zone);
294}
295
296JEMALLOC_ATTR(constructor)
297void
298zone_register(void)
299{
300
301 /*
302 * If something else replaced the system default zone allocator, don't
303 * register jemalloc's.
304 */
305 default_zone = zone_default_get();
306 if (!default_zone->zone_name || strcmp(default_zone->zone_name,
307 "DefaultMallocZone") != 0)
308 return;
309
310 /*
311 * The default purgeable zone is created lazily by OSX's libc. It uses
312 * the default zone when it is created for "small" allocations
313 * (< 15 KiB), but assumes the default zone is a scalable_zone. This
314 * obviously fails when the default zone is the jemalloc zone, so
315 * malloc_default_purgeable_zone() is called beforehand so that the
316 * default purgeable zone is created when the default zone is still
317 * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
318 * to check for the existence of malloc_default_purgeable_zone() at
319 * run time.
320 */
321 purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
322 malloc_default_purgeable_zone();
323
324 /* Register the custom zone. At this point it won't be the default. */
325 zone_init();
326 malloc_zone_register(&jemalloc_zone);
327
328 /* Promote the custom zone to be default. */
329 zone_promote();
Jason Evans2dbecf12010-09-05 10:35:13 -0700330}