Christopher Ferris | e429403 | 2016-03-02 14:33:02 -0800 | [diff] [blame] | 1 | #include "test/jemalloc_test.h" |
| 2 | |
| 3 | const char *malloc_conf = "purge:decay,decay_time:1"; |
| 4 | |
| 5 | static nstime_update_t *nstime_update_orig; |
| 6 | |
| 7 | static unsigned nupdates_mock; |
| 8 | static nstime_t time_mock; |
| 9 | static bool nonmonotonic_mock; |
| 10 | |
| 11 | static bool |
| 12 | nstime_update_mock(nstime_t *time) |
| 13 | { |
| 14 | |
| 15 | nupdates_mock++; |
| 16 | if (!nonmonotonic_mock) |
| 17 | nstime_copy(time, &time_mock); |
| 18 | return (nonmonotonic_mock); |
| 19 | } |
| 20 | |
| 21 | TEST_BEGIN(test_decay_ticks) |
| 22 | { |
| 23 | ticker_t *decay_ticker; |
| 24 | unsigned tick0, tick1; |
| 25 | size_t sz, huge0, large0; |
| 26 | void *p; |
| 27 | |
| 28 | test_skip_if(opt_purge != purge_mode_decay); |
| 29 | |
| 30 | decay_ticker = decay_ticker_get(tsd_fetch(), 0); |
| 31 | assert_ptr_not_null(decay_ticker, |
| 32 | "Unexpected failure getting decay ticker"); |
| 33 | |
| 34 | sz = sizeof(size_t); |
| 35 | assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, |
| 36 | "Unexpected mallctl failure"); |
| 37 | assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, |
| 38 | "Unexpected mallctl failure"); |
| 39 | |
| 40 | /* |
| 41 | * Test the standard APIs using a huge size class, since we can't |
| 42 | * control tcache interactions (except by completely disabling tcache |
| 43 | * for the entire test program). |
| 44 | */ |
| 45 | |
| 46 | /* malloc(). */ |
| 47 | tick0 = ticker_read(decay_ticker); |
| 48 | p = malloc(huge0); |
| 49 | assert_ptr_not_null(p, "Unexpected malloc() failure"); |
| 50 | tick1 = ticker_read(decay_ticker); |
| 51 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); |
| 52 | /* free(). */ |
| 53 | tick0 = ticker_read(decay_ticker); |
| 54 | free(p); |
| 55 | tick1 = ticker_read(decay_ticker); |
| 56 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); |
| 57 | |
| 58 | /* calloc(). */ |
| 59 | tick0 = ticker_read(decay_ticker); |
| 60 | p = calloc(1, huge0); |
| 61 | assert_ptr_not_null(p, "Unexpected calloc() failure"); |
| 62 | tick1 = ticker_read(decay_ticker); |
| 63 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); |
| 64 | free(p); |
| 65 | |
| 66 | /* posix_memalign(). */ |
| 67 | tick0 = ticker_read(decay_ticker); |
| 68 | assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0, |
| 69 | "Unexpected posix_memalign() failure"); |
| 70 | tick1 = ticker_read(decay_ticker); |
| 71 | assert_u32_ne(tick1, tick0, |
| 72 | "Expected ticker to tick during posix_memalign()"); |
| 73 | free(p); |
| 74 | |
| 75 | /* aligned_alloc(). */ |
| 76 | tick0 = ticker_read(decay_ticker); |
| 77 | p = aligned_alloc(sizeof(size_t), huge0); |
| 78 | assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); |
| 79 | tick1 = ticker_read(decay_ticker); |
| 80 | assert_u32_ne(tick1, tick0, |
| 81 | "Expected ticker to tick during aligned_alloc()"); |
| 82 | free(p); |
| 83 | |
| 84 | /* realloc(). */ |
| 85 | /* Allocate. */ |
| 86 | tick0 = ticker_read(decay_ticker); |
| 87 | p = realloc(NULL, huge0); |
| 88 | assert_ptr_not_null(p, "Unexpected realloc() failure"); |
| 89 | tick1 = ticker_read(decay_ticker); |
| 90 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); |
| 91 | /* Reallocate. */ |
| 92 | tick0 = ticker_read(decay_ticker); |
| 93 | p = realloc(p, huge0); |
| 94 | assert_ptr_not_null(p, "Unexpected realloc() failure"); |
| 95 | tick1 = ticker_read(decay_ticker); |
| 96 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); |
| 97 | /* Deallocate. */ |
| 98 | tick0 = ticker_read(decay_ticker); |
| 99 | realloc(p, 0); |
| 100 | tick1 = ticker_read(decay_ticker); |
| 101 | assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); |
| 102 | |
| 103 | /* |
| 104 | * Test the *allocx() APIs using huge, large, and small size classes, |
| 105 | * with tcache explicitly disabled. |
| 106 | */ |
| 107 | { |
| 108 | unsigned i; |
| 109 | size_t allocx_sizes[3]; |
| 110 | allocx_sizes[0] = huge0; |
| 111 | allocx_sizes[1] = large0; |
| 112 | allocx_sizes[2] = 1; |
| 113 | |
| 114 | for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { |
| 115 | sz = allocx_sizes[i]; |
| 116 | |
| 117 | /* mallocx(). */ |
| 118 | tick0 = ticker_read(decay_ticker); |
| 119 | p = mallocx(sz, MALLOCX_TCACHE_NONE); |
| 120 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); |
| 121 | tick1 = ticker_read(decay_ticker); |
| 122 | assert_u32_ne(tick1, tick0, |
| 123 | "Expected ticker to tick during mallocx() (sz=%zu)", |
| 124 | sz); |
| 125 | /* rallocx(). */ |
| 126 | tick0 = ticker_read(decay_ticker); |
| 127 | p = rallocx(p, sz, MALLOCX_TCACHE_NONE); |
| 128 | assert_ptr_not_null(p, "Unexpected rallocx() failure"); |
| 129 | tick1 = ticker_read(decay_ticker); |
| 130 | assert_u32_ne(tick1, tick0, |
| 131 | "Expected ticker to tick during rallocx() (sz=%zu)", |
| 132 | sz); |
| 133 | /* xallocx(). */ |
| 134 | tick0 = ticker_read(decay_ticker); |
| 135 | xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); |
| 136 | tick1 = ticker_read(decay_ticker); |
| 137 | assert_u32_ne(tick1, tick0, |
| 138 | "Expected ticker to tick during xallocx() (sz=%zu)", |
| 139 | sz); |
| 140 | /* dallocx(). */ |
| 141 | tick0 = ticker_read(decay_ticker); |
| 142 | dallocx(p, MALLOCX_TCACHE_NONE); |
| 143 | tick1 = ticker_read(decay_ticker); |
| 144 | assert_u32_ne(tick1, tick0, |
| 145 | "Expected ticker to tick during dallocx() (sz=%zu)", |
| 146 | sz); |
| 147 | /* sdallocx(). */ |
| 148 | p = mallocx(sz, MALLOCX_TCACHE_NONE); |
| 149 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); |
| 150 | tick0 = ticker_read(decay_ticker); |
| 151 | sdallocx(p, sz, MALLOCX_TCACHE_NONE); |
| 152 | tick1 = ticker_read(decay_ticker); |
| 153 | assert_u32_ne(tick1, tick0, |
| 154 | "Expected ticker to tick during sdallocx() " |
| 155 | "(sz=%zu)", sz); |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Test tcache fill/flush interactions for large and small size classes, |
| 161 | * using an explicit tcache. |
| 162 | */ |
| 163 | if (config_tcache) { |
| 164 | unsigned tcache_ind, i; |
| 165 | size_t tcache_sizes[2]; |
| 166 | tcache_sizes[0] = large0; |
| 167 | tcache_sizes[1] = 1; |
| 168 | |
| 169 | sz = sizeof(unsigned); |
| 170 | assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0), |
| 171 | 0, "Unexpected mallctl failure"); |
| 172 | |
| 173 | for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { |
| 174 | sz = tcache_sizes[i]; |
| 175 | |
| 176 | /* tcache fill. */ |
| 177 | tick0 = ticker_read(decay_ticker); |
| 178 | p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); |
| 179 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); |
| 180 | tick1 = ticker_read(decay_ticker); |
| 181 | assert_u32_ne(tick1, tick0, |
| 182 | "Expected ticker to tick during tcache fill " |
| 183 | "(sz=%zu)", sz); |
| 184 | /* tcache flush. */ |
| 185 | dallocx(p, MALLOCX_TCACHE(tcache_ind)); |
| 186 | tick0 = ticker_read(decay_ticker); |
| 187 | assert_d_eq(mallctl("tcache.flush", NULL, NULL, |
| 188 | &tcache_ind, sizeof(unsigned)), 0, |
| 189 | "Unexpected mallctl failure"); |
| 190 | tick1 = ticker_read(decay_ticker); |
| 191 | assert_u32_ne(tick1, tick0, |
| 192 | "Expected ticker to tick during tcache flush " |
| 193 | "(sz=%zu)", sz); |
| 194 | } |
| 195 | } |
| 196 | } |
| 197 | TEST_END |
| 198 | |
| 199 | TEST_BEGIN(test_decay_ticker) |
| 200 | { |
| 201 | #define NPS 1024 |
| 202 | int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); |
| 203 | void *ps[NPS]; |
| 204 | uint64_t epoch; |
| 205 | uint64_t npurge0 = 0; |
| 206 | uint64_t npurge1 = 0; |
| 207 | size_t sz, large; |
| 208 | unsigned i, nupdates0; |
| 209 | nstime_t time, decay_time, deadline; |
| 210 | |
| 211 | test_skip_if(opt_purge != purge_mode_decay); |
| 212 | |
| 213 | /* |
| 214 | * Allocate a bunch of large objects, pause the clock, deallocate the |
| 215 | * objects, restore the clock, then [md]allocx() in a tight loop to |
| 216 | * verify the ticker triggers purging. |
| 217 | */ |
| 218 | |
| 219 | if (config_tcache) { |
| 220 | size_t tcache_max; |
| 221 | |
| 222 | sz = sizeof(size_t); |
| 223 | assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, |
| 224 | 0), 0, "Unexpected mallctl failure"); |
| 225 | large = nallocx(tcache_max + 1, flags); |
| 226 | } else { |
| 227 | sz = sizeof(size_t); |
| 228 | assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0), |
| 229 | 0, "Unexpected mallctl failure"); |
| 230 | } |
| 231 | |
| 232 | assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, |
| 233 | "Unexpected mallctl failure"); |
| 234 | assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, |
| 235 | "Unexpected mallctl failure"); |
| 236 | sz = sizeof(uint64_t); |
| 237 | assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), |
| 238 | config_stats ? 0 : ENOENT, "Unexpected mallctl result"); |
| 239 | |
| 240 | for (i = 0; i < NPS; i++) { |
| 241 | ps[i] = mallocx(large, flags); |
| 242 | assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); |
| 243 | } |
| 244 | |
| 245 | nupdates_mock = 0; |
| 246 | nstime_init(&time_mock, 0); |
| 247 | nstime_update(&time_mock); |
| 248 | nonmonotonic_mock = false; |
| 249 | |
| 250 | nstime_update_orig = nstime_update; |
| 251 | nstime_update = nstime_update_mock; |
| 252 | |
| 253 | for (i = 0; i < NPS; i++) { |
| 254 | dallocx(ps[i], flags); |
| 255 | nupdates0 = nupdates_mock; |
| 256 | assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, |
| 257 | "Unexpected arena.0.decay failure"); |
| 258 | assert_u_gt(nupdates_mock, nupdates0, |
| 259 | "Expected nstime_update() to be called"); |
| 260 | } |
| 261 | |
| 262 | nstime_update = nstime_update_orig; |
| 263 | |
| 264 | nstime_init(&time, 0); |
| 265 | nstime_update(&time); |
| 266 | nstime_init2(&decay_time, opt_decay_time, 0); |
| 267 | nstime_copy(&deadline, &time); |
| 268 | nstime_add(&deadline, &decay_time); |
| 269 | do { |
| 270 | for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { |
| 271 | void *p = mallocx(1, flags); |
| 272 | assert_ptr_not_null(p, "Unexpected mallocx() failure"); |
| 273 | dallocx(p, flags); |
| 274 | } |
| 275 | assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, |
| 276 | sizeof(uint64_t)), 0, "Unexpected mallctl failure"); |
| 277 | sz = sizeof(uint64_t); |
| 278 | assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, |
| 279 | NULL, 0), config_stats ? 0 : ENOENT, |
| 280 | "Unexpected mallctl result"); |
| 281 | |
| 282 | nstime_update(&time); |
| 283 | } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0); |
| 284 | |
| 285 | if (config_stats) |
| 286 | assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); |
| 287 | #undef NPS |
| 288 | } |
| 289 | TEST_END |
| 290 | |
| 291 | TEST_BEGIN(test_decay_nonmonotonic) |
| 292 | { |
| 293 | #define NPS (SMOOTHSTEP_NSTEPS + 1) |
| 294 | int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); |
| 295 | void *ps[NPS]; |
| 296 | uint64_t epoch; |
| 297 | uint64_t npurge0 = 0; |
| 298 | uint64_t npurge1 = 0; |
| 299 | size_t sz, large0; |
| 300 | unsigned i, nupdates0; |
| 301 | |
| 302 | test_skip_if(opt_purge != purge_mode_decay); |
| 303 | |
| 304 | sz = sizeof(size_t); |
| 305 | assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, |
| 306 | "Unexpected mallctl failure"); |
| 307 | |
| 308 | assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, |
| 309 | "Unexpected mallctl failure"); |
| 310 | assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, |
| 311 | "Unexpected mallctl failure"); |
| 312 | sz = sizeof(uint64_t); |
| 313 | assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), |
| 314 | config_stats ? 0 : ENOENT, "Unexpected mallctl result"); |
| 315 | |
| 316 | nupdates_mock = 0; |
| 317 | nstime_init(&time_mock, 0); |
| 318 | nstime_update(&time_mock); |
| 319 | nonmonotonic_mock = true; |
| 320 | |
| 321 | nstime_update_orig = nstime_update; |
| 322 | nstime_update = nstime_update_mock; |
| 323 | |
| 324 | for (i = 0; i < NPS; i++) { |
| 325 | ps[i] = mallocx(large0, flags); |
| 326 | assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); |
| 327 | } |
| 328 | |
| 329 | for (i = 0; i < NPS; i++) { |
| 330 | dallocx(ps[i], flags); |
| 331 | nupdates0 = nupdates_mock; |
| 332 | assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, |
| 333 | "Unexpected arena.0.decay failure"); |
| 334 | assert_u_gt(nupdates_mock, nupdates0, |
| 335 | "Expected nstime_update() to be called"); |
| 336 | } |
| 337 | |
| 338 | assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, |
| 339 | "Unexpected mallctl failure"); |
| 340 | sz = sizeof(uint64_t); |
| 341 | assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), |
| 342 | config_stats ? 0 : ENOENT, "Unexpected mallctl result"); |
| 343 | |
| 344 | if (config_stats) |
| 345 | assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); |
| 346 | |
| 347 | nstime_update = nstime_update_orig; |
| 348 | #undef NPS |
| 349 | } |
| 350 | TEST_END |
| 351 | |
| 352 | int |
| 353 | main(void) |
| 354 | { |
| 355 | |
| 356 | return (test( |
| 357 | test_decay_ticks, |
| 358 | test_decay_ticker, |
| 359 | test_decay_nonmonotonic)); |
| 360 | } |