Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_DSS_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 3 | /******************************************************************************/ |
| 4 | /* Data. */ |
| 5 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 6 | const char *dss_prec_names[] = { |
| 7 | "disabled", |
| 8 | "primary", |
| 9 | "secondary", |
| 10 | "N/A" |
| 11 | }; |
| 12 | |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 13 | /* |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 14 | * Current dss precedence default, used when creating new arenas. NB: This is |
| 15 | * stored as unsigned rather than dss_prec_t because in principle there's no |
| 16 | * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use |
| 17 | * atomic operations to synchronize the setting. |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 18 | */ |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 19 | static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 20 | |
| 21 | /* Base address of the DSS. */ |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 22 | static void *dss_base; |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 23 | /* Atomic boolean indicating whether the DSS is exhausted. */ |
| 24 | static unsigned dss_exhausted; |
| 25 | /* Atomic current upper limit on DSS addresses. */ |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 26 | static void *dss_max; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 27 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 28 | /******************************************************************************/ |
| 29 | |
| 30 | static void * |
Jason Evans | 6668853 | 2013-12-03 21:49:36 -0800 | [diff] [blame] | 31 | chunk_dss_sbrk(intptr_t increment) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 32 | { |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 33 | |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 34 | #ifdef JEMALLOC_DSS |
Jason Evans | 6668853 | 2013-12-03 21:49:36 -0800 | [diff] [blame] | 35 | return (sbrk(increment)); |
| 36 | #else |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 37 | not_implemented(); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 38 | return (NULL); |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 39 | #endif |
Jason Evans | 6668853 | 2013-12-03 21:49:36 -0800 | [diff] [blame] | 40 | } |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 41 | |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 42 | dss_prec_t |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 43 | chunk_dss_prec_get(void) |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 44 | { |
| 45 | dss_prec_t ret; |
| 46 | |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 47 | if (!have_dss) |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 48 | return (dss_prec_disabled); |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 49 | ret = (dss_prec_t)atomic_read_u(&dss_prec_default); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 50 | return (ret); |
| 51 | } |
| 52 | |
| 53 | bool |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 54 | chunk_dss_prec_set(dss_prec_t dss_prec) |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 55 | { |
| 56 | |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 57 | if (!have_dss) |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 58 | return (dss_prec != dss_prec_disabled); |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 59 | atomic_write_u(&dss_prec_default, (unsigned)dss_prec); |
Jason Evans | 609ae59 | 2012-10-11 13:53:15 -0700 | [diff] [blame] | 60 | return (false); |
| 61 | } |
| 62 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 63 | static void * |
| 64 | chunk_dss_max_update(void *new_addr) |
| 65 | { |
| 66 | void *max_cur; |
| 67 | spin_t spinner; |
| 68 | |
| 69 | /* |
| 70 | * Get the current end of the DSS as max_cur and assure that dss_max is |
| 71 | * up to date. |
| 72 | */ |
| 73 | spin_init(&spinner); |
| 74 | while (true) { |
| 75 | void *max_prev = atomic_read_p(&dss_max); |
| 76 | |
| 77 | max_cur = chunk_dss_sbrk(0); |
| 78 | if ((uintptr_t)max_prev > (uintptr_t)max_cur) { |
| 79 | /* |
| 80 | * Another thread optimistically updated dss_max. Wait |
| 81 | * for it to finish. |
| 82 | */ |
| 83 | spin_adaptive(&spinner); |
| 84 | continue; |
| 85 | } |
| 86 | if (!atomic_cas_p(&dss_max, max_prev, max_cur)) |
| 87 | break; |
| 88 | } |
| 89 | /* Fixed new_addr can only be supported if it is at the edge of DSS. */ |
| 90 | if (new_addr != NULL && max_cur != new_addr) |
| 91 | return (NULL); |
| 92 | |
| 93 | return (max_cur); |
| 94 | } |
| 95 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 96 | void * |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 97 | chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, |
Jason Evans | b2c0d63 | 2016-04-13 23:36:15 -0700 | [diff] [blame] | 98 | size_t alignment, bool *zero, bool *commit) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 99 | { |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 100 | cassert(have_dss); |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 101 | assert(size > 0 && (size & chunksize_mask) == 0); |
| 102 | assert(alignment > 0 && (alignment & chunksize_mask) == 0); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 103 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 104 | /* |
| 105 | * sbrk() uses a signed increment argument, so take care not to |
| 106 | * interpret a huge allocation request as a negative increment. |
| 107 | */ |
| 108 | if ((intptr_t)size < 0) |
| 109 | return (NULL); |
| 110 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 111 | if (!atomic_read_u(&dss_exhausted)) { |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 112 | /* |
| 113 | * The loop is necessary to recover from races with other |
| 114 | * threads that are using the DSS for something other than |
| 115 | * malloc. |
| 116 | */ |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 117 | while (true) { |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 118 | void *ret, *max_cur, *dss_next, *dss_prev; |
| 119 | void *gap_addr_chunk, *gap_addr_subchunk; |
| 120 | size_t gap_size_chunk, gap_size_subchunk; |
Dmitry-Me | 78ae1ac | 2015-09-08 15:09:20 +0300 | [diff] [blame] | 121 | intptr_t incr; |
Daniel Micay | 879e76a | 2014-11-03 14:02:52 -0500 | [diff] [blame] | 122 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 123 | max_cur = chunk_dss_max_update(new_addr); |
| 124 | if (max_cur == NULL) |
| 125 | goto label_oom; |
Daniel Micay | 879e76a | 2014-11-03 14:02:52 -0500 | [diff] [blame] | 126 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 127 | /* |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 128 | * Compute how much chunk-aligned gap space (if any) is |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 129 | * necessary to satisfy alignment. This space can be |
| 130 | * recycled for later use. |
| 131 | */ |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 132 | gap_addr_chunk = (void *)(CHUNK_CEILING( |
| 133 | (uintptr_t)max_cur)); |
| 134 | ret = (void *)ALIGNMENT_CEILING( |
| 135 | (uintptr_t)gap_addr_chunk, alignment); |
| 136 | gap_size_chunk = (uintptr_t)ret - |
| 137 | (uintptr_t)gap_addr_chunk; |
| 138 | /* |
| 139 | * Compute the address just past the end of the desired |
| 140 | * allocation space. |
| 141 | */ |
Mike Hommey | eae2690 | 2012-04-10 19:50:33 +0200 | [diff] [blame] | 142 | dss_next = (void *)((uintptr_t)ret + size); |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 143 | if ((uintptr_t)ret < (uintptr_t)max_cur || |
| 144 | (uintptr_t)dss_next < (uintptr_t)max_cur) |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 145 | goto label_oom; /* Wrap-around. */ |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 146 | /* Compute the increment, including subchunk bytes. */ |
| 147 | gap_addr_subchunk = max_cur; |
| 148 | gap_size_subchunk = (uintptr_t)ret - |
| 149 | (uintptr_t)gap_addr_subchunk; |
| 150 | incr = gap_size_subchunk + size; |
| 151 | |
| 152 | assert((uintptr_t)max_cur + incr == (uintptr_t)ret + |
| 153 | size); |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * Optimistically update dss_max, and roll back below if |
| 157 | * sbrk() fails. No other thread will try to extend the |
| 158 | * DSS while dss_max is greater than the current DSS |
| 159 | * max reported by sbrk(0). |
| 160 | */ |
| 161 | if (atomic_cas_p(&dss_max, max_cur, dss_next)) |
| 162 | continue; |
| 163 | |
| 164 | /* Try to allocate. */ |
Jason Evans | 6668853 | 2013-12-03 21:49:36 -0800 | [diff] [blame] | 165 | dss_prev = chunk_dss_sbrk(incr); |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 166 | if (dss_prev == max_cur) { |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 167 | /* Success. */ |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 168 | if (gap_size_chunk != 0) { |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 169 | chunk_hooks_t chunk_hooks = |
| 170 | CHUNK_HOOKS_INITIALIZER; |
Jason Evans | c1e00ef | 2016-05-10 22:21:10 -0700 | [diff] [blame] | 171 | chunk_dalloc_wrapper(tsdn, arena, |
Jason Evans | cd7073e | 2017-02-24 09:45:33 -0800 | [diff] [blame] | 172 | &chunk_hooks, gap_addr_chunk, |
| 173 | gap_size_chunk, |
Jason Evans | 5c77af9 | 2016-11-14 18:27:23 -0800 | [diff] [blame] | 174 | arena_extent_sn_next(arena), false, |
| 175 | true); |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 176 | } |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 177 | if (*zero) { |
Jason Evans | bd87b01 | 2014-04-15 16:35:08 -0700 | [diff] [blame] | 178 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( |
| 179 | ret, size); |
Jason Evans | 7ad54c1 | 2012-04-21 16:04:51 -0700 | [diff] [blame] | 180 | memset(ret, 0, size); |
| 181 | } |
Jason Evans | 03bf5b6 | 2015-08-12 10:26:54 -0700 | [diff] [blame] | 182 | if (!*commit) |
| 183 | *commit = pages_decommit(ret, size); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 184 | return (ret); |
| 185 | } |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 186 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 187 | /* |
| 188 | * Failure, whether due to OOM or a race with a raw |
| 189 | * sbrk() call from outside the allocator. Try to roll |
| 190 | * back optimistic dss_max update; if rollback fails, |
| 191 | * it's due to another caller of this function having |
| 192 | * succeeded since this invocation started, in which |
| 193 | * case rollback is not necessary. |
| 194 | */ |
| 195 | atomic_cas_p(&dss_max, dss_next, max_cur); |
| 196 | if (dss_prev == (void *)-1) { |
| 197 | /* OOM. */ |
| 198 | atomic_write_u(&dss_exhausted, (unsigned)true); |
| 199 | goto label_oom; |
| 200 | } |
| 201 | } |
| 202 | } |
| 203 | label_oom: |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 204 | return (NULL); |
| 205 | } |
| 206 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 207 | static bool |
| 208 | chunk_in_dss_helper(void *chunk, void *max) |
Jason Evans | cfdc8cf | 2010-11-30 16:50:58 -0800 | [diff] [blame] | 209 | { |
Jason Evans | cfdc8cf | 2010-11-30 16:50:58 -0800 | [diff] [blame] | 210 | |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 211 | return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < |
| 212 | (uintptr_t)max); |
Jason Evans | cfdc8cf | 2010-11-30 16:50:58 -0800 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | bool |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 216 | chunk_in_dss(void *chunk) |
| 217 | { |
| 218 | |
| 219 | cassert(have_dss); |
| 220 | |
| 221 | return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); |
| 222 | } |
| 223 | |
| 224 | bool |
| 225 | chunk_dss_mergeable(void *chunk_a, void *chunk_b) |
| 226 | { |
| 227 | void *max; |
| 228 | |
| 229 | cassert(have_dss); |
| 230 | |
| 231 | max = atomic_read_p(&dss_max); |
| 232 | return (chunk_in_dss_helper(chunk_a, max) == |
| 233 | chunk_in_dss_helper(chunk_b, max)); |
| 234 | } |
| 235 | |
| 236 | void |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 237 | chunk_dss_boot(void) |
| 238 | { |
| 239 | |
Jason Evans | 4d434ad | 2014-04-15 12:09:48 -0700 | [diff] [blame] | 240 | cassert(have_dss); |
Jason Evans | 7372b15 | 2012-02-10 20:22:09 -0800 | [diff] [blame] | 241 | |
Jason Evans | 6668853 | 2013-12-03 21:49:36 -0800 | [diff] [blame] | 242 | dss_base = chunk_dss_sbrk(0); |
Jason Evans | e2bcf03 | 2016-10-13 12:18:38 -0700 | [diff] [blame] | 243 | dss_exhausted = (unsigned)(dss_base == (void *)-1); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 244 | dss_max = dss_base; |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 247 | /******************************************************************************/ |