Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 1 | #include "jemalloc/internal/jemalloc_internal.h" |
| 2 | #ifndef JEMALLOC_ZONE |
| 3 | # error "This source file is for zones on Darwin (OS X)." |
| 4 | #endif |
| 5 | |
Mike Hommey | 3c2ba0d | 2012-03-27 14:20:13 +0200 | [diff] [blame] | 6 | /* |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 7 | * The malloc_default_purgeable_zone() function is only available on >= 10.6. |
Mike Hommey | 3c2ba0d | 2012-03-27 14:20:13 +0200 | [diff] [blame] | 8 | * We need to check whether it is present at runtime, thus the weak_import. |
| 9 | */ |
| 10 | extern malloc_zone_t *malloc_default_purgeable_zone(void) |
| 11 | JEMALLOC_ATTR(weak_import); |
| 12 | |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 13 | /******************************************************************************/ |
| 14 | /* Data. */ |
| 15 | |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 16 | static malloc_zone_t *default_zone, *purgeable_zone; |
| 17 | static malloc_zone_t jemalloc_zone; |
| 18 | static struct malloc_introspection_t jemalloc_zone_introspect; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 19 | |
| 20 | /******************************************************************************/ |
| 21 | /* Function prototypes for non-inline static functions. */ |
| 22 | |
| 23 | static size_t zone_size(malloc_zone_t *zone, void *ptr); |
| 24 | static void *zone_malloc(malloc_zone_t *zone, size_t size); |
| 25 | static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); |
| 26 | static void *zone_valloc(malloc_zone_t *zone, size_t size); |
| 27 | static void zone_free(malloc_zone_t *zone, void *ptr); |
| 28 | static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); |
Mike Hommey | 154829d | 2012-03-20 18:01:38 +0100 | [diff] [blame] | 29 | #if (JEMALLOC_ZONE_VERSION >= 5) |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 30 | static void *zone_memalign(malloc_zone_t *zone, size_t alignment, |
Mike Hommey | 154829d | 2012-03-20 18:01:38 +0100 | [diff] [blame] | 31 | #endif |
| 32 | #if (JEMALLOC_ZONE_VERSION >= 6) |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 33 | size_t size); |
| 34 | static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, |
| 35 | size_t size); |
| 36 | #endif |
| 37 | static void *zone_destroy(malloc_zone_t *zone); |
| 38 | static size_t zone_good_size(malloc_zone_t *zone, size_t size); |
| 39 | static void zone_force_lock(malloc_zone_t *zone); |
| 40 | static void zone_force_unlock(malloc_zone_t *zone); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 41 | |
| 42 | /******************************************************************************/ |
| 43 | /* |
| 44 | * Functions. |
| 45 | */ |
| 46 | |
| 47 | static size_t |
| 48 | zone_size(malloc_zone_t *zone, void *ptr) |
| 49 | { |
| 50 | |
| 51 | /* |
| 52 | * There appear to be places within Darwin (such as setenv(3)) that |
| 53 | * cause calls to this function with pointers that *no* zone owns. If |
| 54 | * we knew that all pointers were owned by *some* zone, we could split |
| 55 | * our zone into two parts, and use one as the default allocator and |
| 56 | * the other as the default deallocator/reallocator. Since that will |
| 57 | * not work in practice, we must check all pointers to assure that they |
| 58 | * reside within a mapped chunk before determining size. |
| 59 | */ |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 60 | return (ivsalloc(tsdn_fetch(), ptr, config_prof)); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static void * |
| 64 | zone_malloc(malloc_zone_t *zone, size_t size) |
| 65 | { |
| 66 | |
Jason Evans | 0a5489e | 2012-03-01 17:19:20 -0800 | [diff] [blame] | 67 | return (je_malloc(size)); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static void * |
| 71 | zone_calloc(malloc_zone_t *zone, size_t num, size_t size) |
| 72 | { |
| 73 | |
Jason Evans | 0a5489e | 2012-03-01 17:19:20 -0800 | [diff] [blame] | 74 | return (je_calloc(num, size)); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | static void * |
| 78 | zone_valloc(malloc_zone_t *zone, size_t size) |
| 79 | { |
| 80 | void *ret = NULL; /* Assignment avoids useless compiler warning. */ |
| 81 | |
Jason Evans | ae4c7b4 | 2012-04-02 07:04:34 -0700 | [diff] [blame] | 82 | je_posix_memalign(&ret, PAGE, size); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 83 | |
| 84 | return (ret); |
| 85 | } |
| 86 | |
| 87 | static void |
| 88 | zone_free(malloc_zone_t *zone, void *ptr) |
| 89 | { |
| 90 | |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 91 | if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { |
Mike Hommey | 5b3db09 | 2012-03-26 18:39:35 +0200 | [diff] [blame] | 92 | je_free(ptr); |
| 93 | return; |
| 94 | } |
| 95 | |
| 96 | free(ptr); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | static void * |
| 100 | zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) |
| 101 | { |
| 102 | |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 103 | if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) |
Mike Hommey | 5b3db09 | 2012-03-26 18:39:35 +0200 | [diff] [blame] | 104 | return (je_realloc(ptr, size)); |
| 105 | |
| 106 | return (realloc(ptr, size)); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Mike Hommey | 154829d | 2012-03-20 18:01:38 +0100 | [diff] [blame] | 109 | #if (JEMALLOC_ZONE_VERSION >= 5) |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 110 | static void * |
| 111 | zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) |
| 112 | { |
| 113 | void *ret = NULL; /* Assignment avoids useless compiler warning. */ |
| 114 | |
Jason Evans | 0a5489e | 2012-03-01 17:19:20 -0800 | [diff] [blame] | 115 | je_posix_memalign(&ret, alignment, size); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 116 | |
| 117 | return (ret); |
| 118 | } |
Mike Hommey | 154829d | 2012-03-20 18:01:38 +0100 | [diff] [blame] | 119 | #endif |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 120 | |
Mike Hommey | 154829d | 2012-03-20 18:01:38 +0100 | [diff] [blame] | 121 | #if (JEMALLOC_ZONE_VERSION >= 6) |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 122 | static void |
| 123 | zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) |
| 124 | { |
Dmitry-Me | ea59ebf | 2015-11-12 14:59:29 +0300 | [diff] [blame] | 125 | size_t alloc_size; |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 126 | |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 127 | alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); |
Dmitry-Me | ea59ebf | 2015-11-12 14:59:29 +0300 | [diff] [blame] | 128 | if (alloc_size != 0) { |
| 129 | assert(alloc_size == size); |
Mike Hommey | 5b3db09 | 2012-03-26 18:39:35 +0200 | [diff] [blame] | 130 | je_free(ptr); |
| 131 | return; |
| 132 | } |
| 133 | |
| 134 | free(ptr); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 135 | } |
| 136 | #endif |
| 137 | |
| 138 | static void * |
| 139 | zone_destroy(malloc_zone_t *zone) |
| 140 | { |
| 141 | |
| 142 | /* This function should never be called. */ |
Jason Evans | 6556e28 | 2013-10-21 14:56:27 -0700 | [diff] [blame] | 143 | not_reached(); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 144 | return (NULL); |
| 145 | } |
| 146 | |
| 147 | static size_t |
| 148 | zone_good_size(malloc_zone_t *zone, size_t size) |
| 149 | { |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 150 | |
Jason Evans | 166a745 | 2012-02-29 12:58:39 -0800 | [diff] [blame] | 151 | if (size == 0) |
| 152 | size = 1; |
| 153 | return (s2u(size)); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | static void |
| 157 | zone_force_lock(malloc_zone_t *zone) |
| 158 | { |
| 159 | |
| 160 | if (isthreaded) |
| 161 | jemalloc_prefork(); |
| 162 | } |
| 163 | |
| 164 | static void |
| 165 | zone_force_unlock(malloc_zone_t *zone) |
| 166 | { |
| 167 | |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 168 | /* |
| 169 | * Call jemalloc_postfork_child() rather than |
| 170 | * jemalloc_postfork_parent(), because this function is executed by both |
| 171 | * parent and child. The parent can tolerate having state |
| 172 | * reinitialized, but the child cannot unlock mutexes that were locked |
| 173 | * by the parent. |
| 174 | */ |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 175 | if (isthreaded) |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 176 | jemalloc_postfork_child(); |
| 177 | } |
| 178 | |
| 179 | static void |
| 180 | zone_init(void) |
| 181 | { |
| 182 | |
| 183 | jemalloc_zone.size = (void *)zone_size; |
| 184 | jemalloc_zone.malloc = (void *)zone_malloc; |
| 185 | jemalloc_zone.calloc = (void *)zone_calloc; |
| 186 | jemalloc_zone.valloc = (void *)zone_valloc; |
| 187 | jemalloc_zone.free = (void *)zone_free; |
| 188 | jemalloc_zone.realloc = (void *)zone_realloc; |
| 189 | jemalloc_zone.destroy = (void *)zone_destroy; |
| 190 | jemalloc_zone.zone_name = "jemalloc_zone"; |
| 191 | jemalloc_zone.batch_malloc = NULL; |
| 192 | jemalloc_zone.batch_free = NULL; |
| 193 | jemalloc_zone.introspect = &jemalloc_zone_introspect; |
| 194 | jemalloc_zone.version = JEMALLOC_ZONE_VERSION; |
| 195 | #if (JEMALLOC_ZONE_VERSION >= 5) |
| 196 | jemalloc_zone.memalign = zone_memalign; |
| 197 | #endif |
| 198 | #if (JEMALLOC_ZONE_VERSION >= 6) |
| 199 | jemalloc_zone.free_definite_size = zone_free_definite_size; |
| 200 | #endif |
| 201 | #if (JEMALLOC_ZONE_VERSION >= 8) |
| 202 | jemalloc_zone.pressure_relief = NULL; |
| 203 | #endif |
| 204 | |
| 205 | jemalloc_zone_introspect.enumerator = NULL; |
| 206 | jemalloc_zone_introspect.good_size = (void *)zone_good_size; |
| 207 | jemalloc_zone_introspect.check = NULL; |
| 208 | jemalloc_zone_introspect.print = NULL; |
| 209 | jemalloc_zone_introspect.log = NULL; |
| 210 | jemalloc_zone_introspect.force_lock = (void *)zone_force_lock; |
| 211 | jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock; |
| 212 | jemalloc_zone_introspect.statistics = NULL; |
| 213 | #if (JEMALLOC_ZONE_VERSION >= 6) |
| 214 | jemalloc_zone_introspect.zone_locked = NULL; |
| 215 | #endif |
| 216 | #if (JEMALLOC_ZONE_VERSION >= 7) |
| 217 | jemalloc_zone_introspect.enable_discharge_checking = NULL; |
| 218 | jemalloc_zone_introspect.disable_discharge_checking = NULL; |
| 219 | jemalloc_zone_introspect.discharge = NULL; |
| 220 | # ifdef __BLOCKS__ |
| 221 | jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; |
| 222 | # else |
| 223 | jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; |
| 224 | # endif |
| 225 | #endif |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Jason Evans | 57cddff | 2016-09-26 11:00:32 -0700 | [diff] [blame] | 228 | static malloc_zone_t * |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 229 | zone_default_get(void) |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 230 | { |
| 231 | malloc_zone_t **zones = NULL; |
| 232 | unsigned int num_zones = 0; |
| 233 | |
| 234 | /* |
| 235 | * On OSX 10.12, malloc_default_zone returns a special zone that is not |
| 236 | * present in the list of registered zones. That zone uses a "lite zone" |
| 237 | * if one is present (apparently enabled when malloc stack logging is |
| 238 | * enabled), or the first registered zone otherwise. In practice this |
| 239 | * means unless malloc stack logging is enabled, the first registered |
Jason Evans | 57cddff | 2016-09-26 11:00:32 -0700 | [diff] [blame] | 240 | * zone is the default. So get the list of zones to get the first one, |
| 241 | * instead of relying on malloc_default_zone. |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 242 | */ |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 243 | if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, |
Jason Evans | 57cddff | 2016-09-26 11:00:32 -0700 | [diff] [blame] | 244 | (vm_address_t**)&zones, &num_zones)) { |
| 245 | /* |
| 246 | * Reset the value in case the failure happened after it was |
| 247 | * set. |
| 248 | */ |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 249 | num_zones = 0; |
| 250 | } |
| 251 | |
| 252 | if (num_zones) |
Jason Evans | 57cddff | 2016-09-26 11:00:32 -0700 | [diff] [blame] | 253 | return (zones[0]); |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 254 | |
Jason Evans | 57cddff | 2016-09-26 11:00:32 -0700 | [diff] [blame] | 255 | return (malloc_default_zone()); |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 256 | } |
| 257 | |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 258 | /* As written, this function can only promote jemalloc_zone. */ |
| 259 | static void |
| 260 | zone_promote(void) |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 261 | { |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 262 | malloc_zone_t *zone; |
Mike Hommey | 71a93b8 | 2012-03-27 14:20:12 +0200 | [diff] [blame] | 263 | |
Mike Hommey | 71a93b8 | 2012-03-27 14:20:12 +0200 | [diff] [blame] | 264 | do { |
Mike Hommey | 6f533c1 | 2014-06-10 18:18:22 +0900 | [diff] [blame] | 265 | /* |
| 266 | * Unregister and reregister the default zone. On OSX >= 10.6, |
| 267 | * unregistering takes the last registered zone and places it |
| 268 | * at the location of the specified zone. Unregistering the |
| 269 | * default zone thus makes the last registered one the default. |
| 270 | * On OSX < 10.6, unregistering shifts all registered zones. |
| 271 | * The first registered zone then becomes the default. |
| 272 | */ |
Mike Hommey | 71a93b8 | 2012-03-27 14:20:12 +0200 | [diff] [blame] | 273 | malloc_zone_unregister(default_zone); |
| 274 | malloc_zone_register(default_zone); |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 275 | |
Mike Hommey | 6f533c1 | 2014-06-10 18:18:22 +0900 | [diff] [blame] | 276 | /* |
| 277 | * On OSX 10.6, having the default purgeable zone appear before |
| 278 | * the default zone makes some things crash because it thinks it |
Jason Evans | c21b05e | 2014-09-04 22:27:26 -0700 | [diff] [blame] | 279 | * owns the default zone allocated pointers. We thus |
| 280 | * unregister/re-register it in order to ensure it's always |
| 281 | * after the default zone. On OSX < 10.6, there is no purgeable |
| 282 | * zone, so this does nothing. On OSX >= 10.6, unregistering |
| 283 | * replaces the purgeable zone with the last registered zone |
Jason Evans | e12eaf9 | 2014-12-08 14:40:14 -0800 | [diff] [blame] | 284 | * above, i.e. the default zone. Registering it again then puts |
Jason Evans | c21b05e | 2014-09-04 22:27:26 -0700 | [diff] [blame] | 285 | * it at the end, obviously after the default zone. |
Mike Hommey | 6f533c1 | 2014-06-10 18:18:22 +0900 | [diff] [blame] | 286 | */ |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 287 | if (purgeable_zone != NULL) { |
Mike Hommey | 6f533c1 | 2014-06-10 18:18:22 +0900 | [diff] [blame] | 288 | malloc_zone_unregister(purgeable_zone); |
| 289 | malloc_zone_register(purgeable_zone); |
| 290 | } |
Mike Hommey | 11b5da7 | 2016-07-08 13:35:35 +0900 | [diff] [blame] | 291 | |
Jason Evans | a99e0fa | 2016-11-02 18:06:40 -0700 | [diff] [blame] | 292 | zone = zone_default_get(); |
| 293 | } while (zone != &jemalloc_zone); |
| 294 | } |
| 295 | |
| 296 | JEMALLOC_ATTR(constructor) |
| 297 | void |
| 298 | zone_register(void) |
| 299 | { |
| 300 | |
| 301 | /* |
| 302 | * If something else replaced the system default zone allocator, don't |
| 303 | * register jemalloc's. |
| 304 | */ |
| 305 | default_zone = zone_default_get(); |
| 306 | if (!default_zone->zone_name || strcmp(default_zone->zone_name, |
| 307 | "DefaultMallocZone") != 0) |
| 308 | return; |
| 309 | |
| 310 | /* |
| 311 | * The default purgeable zone is created lazily by OSX's libc. It uses |
| 312 | * the default zone when it is created for "small" allocations |
| 313 | * (< 15 KiB), but assumes the default zone is a scalable_zone. This |
| 314 | * obviously fails when the default zone is the jemalloc zone, so |
| 315 | * malloc_default_purgeable_zone() is called beforehand so that the |
| 316 | * default purgeable zone is created when the default zone is still |
| 317 | * a scalable_zone. As purgeable zones only exist on >= 10.6, we need |
| 318 | * to check for the existence of malloc_default_purgeable_zone() at |
| 319 | * run time. |
| 320 | */ |
| 321 | purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : |
| 322 | malloc_default_purgeable_zone(); |
| 323 | |
| 324 | /* Register the custom zone. At this point it won't be the default. */ |
| 325 | zone_init(); |
| 326 | malloc_zone_register(&jemalloc_zone); |
| 327 | |
| 328 | /* Promote the custom zone to be default. */ |
| 329 | zone_promote(); |
Jason Evans | 2dbecf1 | 2010-09-05 10:35:13 -0700 | [diff] [blame] | 330 | } |