blob: 6e32f40471afed4a3669ee13ce6f880f45b65d17 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_TCACHE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans962463d2012-02-13 12:29:49 -08003
Jason Evanse476f8a2010-01-16 09:53:50 -08004/******************************************************************************/
5/* Data. */
6
Jason Evans3fa9a2f2010-03-07 15:34:14 -08007bool opt_tcache = true;
Jason Evanse7339702010-10-23 18:37:06 -07008ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
Jason Evanse476f8a2010-01-16 09:53:50 -08009
Jason Evans84c8eef2011-03-16 10:30:13 -070010tcache_bin_info_t *tcache_bin_info;
11static unsigned stack_nelms; /* Total stack elms per tcache. */
12
Christopher Ferrise4294032016-03-02 14:33:02 -080013unsigned nhbins;
Jason Evanscd9a1342012-03-21 18:33:03 -070014size_t tcache_maxclass;
Jason Evanse476f8a2010-01-16 09:53:50 -080015
Jason Evans1cb181e2015-01-29 15:30:47 -080016tcaches_t *tcaches;
17
18/* Index of first element within tcaches that has never been used. */
19static unsigned tcaches_past;
20
21/* Head of singly linked list tracking available tcaches elements. */
22static tcaches_t *tcaches_avail;
23
Jason Evanse476f8a2010-01-16 09:53:50 -080024/******************************************************************************/
25
Jason Evansf7088e62012-04-19 18:28:03 -070026size_t tcache_salloc(const void *ptr)
27{
28
29 return (arena_salloc(ptr, false));
30}
31
Jason Evans203484e2012-05-02 00:30:36 -070032void
Jason Evans1cb181e2015-01-29 15:30:47 -080033tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
Jason Evans203484e2012-05-02 00:30:36 -070034{
Jason Evansd01fd192015-08-19 15:21:32 -070035 szind_t binind = tcache->next_gc_bin;
Jason Evans203484e2012-05-02 00:30:36 -070036 tcache_bin_t *tbin = &tcache->tbins[binind];
37 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
38
39 if (tbin->low_water > 0) {
40 /*
41 * Flush (ceiling) 3/4 of the objects below the low water mark.
42 */
43 if (binind < NBINS) {
Jason Evans41cfe032015-02-13 15:28:56 -080044 tcache_bin_flush_small(tsd, tcache, tbin, binind,
45 tbin->ncached - tbin->low_water + (tbin->low_water
46 >> 2));
Jason Evans203484e2012-05-02 00:30:36 -070047 } else {
Jason Evans1cb181e2015-01-29 15:30:47 -080048 tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
49 - tbin->low_water + (tbin->low_water >> 2), tcache);
Jason Evans203484e2012-05-02 00:30:36 -070050 }
51 /*
52 * Reduce fill count by 2X. Limit lg_fill_div such that the
53 * fill count is always at least 1.
54 */
55 if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
56 tbin->lg_fill_div++;
57 } else if (tbin->low_water < 0) {
58 /*
59 * Increase fill count by 2X. Make sure lg_fill_div stays
60 * greater than 0.
61 */
62 if (tbin->lg_fill_div > 1)
63 tbin->lg_fill_div--;
64 }
65 tbin->low_water = tbin->ncached;
66
67 tcache->next_gc_bin++;
68 if (tcache->next_gc_bin == nhbins)
69 tcache->next_gc_bin = 0;
Jason Evans203484e2012-05-02 00:30:36 -070070}
71
Jason Evanse476f8a2010-01-16 09:53:50 -080072void *
Jason Evans41cfe032015-02-13 15:28:56 -080073tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
Qi Wangf4a0f322015-10-27 15:12:10 -070074 tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
Jason Evanse476f8a2010-01-16 09:53:50 -080075{
76 void *ret;
77
Christopher Ferrise4294032016-03-02 14:33:02 -080078 arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
Jason Evans41cfe032015-02-13 15:28:56 -080079 tcache->prof_accumbytes : 0);
Jason Evans7372b152012-02-10 20:22:09 -080080 if (config_prof)
81 tcache->prof_accumbytes = 0;
Qi Wangf4a0f322015-10-27 15:12:10 -070082 ret = tcache_alloc_easy(tbin, tcache_success);
Jason Evanse476f8a2010-01-16 09:53:50 -080083
84 return (ret);
85}
86
Jason Evanse476f8a2010-01-16 09:53:50 -080087void
Jason Evans41cfe032015-02-13 15:28:56 -080088tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
Jason Evansd01fd192015-08-19 15:21:32 -070089 szind_t binind, unsigned rem)
Jason Evanse476f8a2010-01-16 09:53:50 -080090{
Jason Evans1cb181e2015-01-29 15:30:47 -080091 arena_t *arena;
Jason Evans84c8eef2011-03-16 10:30:13 -070092 void *ptr;
Jason Evans3fa9a2f2010-03-07 15:34:14 -080093 unsigned i, nflush, ndeferred;
Jason Evansa8118232011-03-14 12:56:51 -070094 bool merged_stats = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080095
Jason Evansb1726102012-02-28 16:50:47 -080096 assert(binind < NBINS);
Jason Evans86815df2010-03-13 20:32:56 -080097 assert(rem <= tbin->ncached);
98
Jason Evans1cb181e2015-01-29 15:30:47 -080099 arena = arena_choose(tsd, NULL);
100 assert(arena != NULL);
Jason Evans84c8eef2011-03-16 10:30:13 -0700101 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
Jason Evans86815df2010-03-13 20:32:56 -0800102 /* Lock the arena bin associated with the first object. */
Jason Evans84c8eef2011-03-16 10:30:13 -0700103 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
Qi Wangf4a0f322015-10-27 15:12:10 -0700104 *(tbin->avail - 1));
Jason Evansee41ad42015-02-15 18:04:46 -0800105 arena_t *bin_arena = extent_node_arena_get(&chunk->node);
Jason Evans064dbfb2015-02-12 00:09:37 -0800106 arena_bin_t *bin = &bin_arena->bins[binind];
Jason Evans86815df2010-03-13 20:32:56 -0800107
Jason Evans1cb181e2015-01-29 15:30:47 -0800108 if (config_prof && bin_arena == arena) {
Jason Evans88c222c2013-02-06 11:59:30 -0800109 if (arena_prof_accum(arena, tcache->prof_accumbytes))
110 prof_idump();
Jason Evansd34f9e72010-02-11 13:19:21 -0800111 tcache->prof_accumbytes = 0;
Jason Evanse69bee02010-03-15 22:25:23 -0700112 }
Jason Evanse69bee02010-03-15 22:25:23 -0700113
114 malloc_mutex_lock(&bin->lock);
Jason Evans1cb181e2015-01-29 15:30:47 -0800115 if (config_stats && bin_arena == arena) {
Jason Evans551ebc42014-10-03 10:16:09 -0700116 assert(!merged_stats);
Jason Evansa8118232011-03-14 12:56:51 -0700117 merged_stats = true;
Jason Evans86815df2010-03-13 20:32:56 -0800118 bin->stats.nflushes++;
119 bin->stats.nrequests += tbin->tstats.nrequests;
120 tbin->tstats.nrequests = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -0800121 }
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800122 ndeferred = 0;
123 for (i = 0; i < nflush; i++) {
Qi Wangf4a0f322015-10-27 15:12:10 -0700124 ptr = *(tbin->avail - 1 - i);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800125 assert(ptr != NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -0800126 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansee41ad42015-02-15 18:04:46 -0800127 if (extent_node_arena_get(&chunk->node) == bin_arena) {
Jason Evans7393f442010-10-01 17:35:43 -0700128 size_t pageind = ((uintptr_t)ptr -
Jason Evansae4c7b42012-04-02 07:04:34 -0700129 (uintptr_t)chunk) >> LG_PAGE;
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700130 arena_chunk_map_bits_t *bitselm =
131 arena_bitselm_get(chunk, pageind);
Jason Evans1cb181e2015-01-29 15:30:47 -0800132 arena_dalloc_bin_junked_locked(bin_arena, chunk,
Jason Evansfc0b3b72014-10-09 17:54:06 -0700133 ptr, bitselm);
Jason Evanse476f8a2010-01-16 09:53:50 -0800134 } else {
135 /*
136 * This object was allocated via a different
Jason Evans86815df2010-03-13 20:32:56 -0800137 * arena bin than the one that is currently
138 * locked. Stash the object, so that it can be
139 * handled in a future pass.
Jason Evanse476f8a2010-01-16 09:53:50 -0800140 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700141 *(tbin->avail - 1 - ndeferred) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -0800142 ndeferred++;
143 }
144 }
Jason Evans86815df2010-03-13 20:32:56 -0800145 malloc_mutex_unlock(&bin->lock);
Christopher Ferrise4294032016-03-02 14:33:02 -0800146 arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
Jason Evanse476f8a2010-01-16 09:53:50 -0800147 }
Jason Evans551ebc42014-10-03 10:16:09 -0700148 if (config_stats && !merged_stats) {
Jason Evansa8118232011-03-14 12:56:51 -0700149 /*
150 * The flush loop didn't happen to flush to this thread's
151 * arena, so the stats didn't get merged. Manually do so now.
152 */
Jason Evans1cb181e2015-01-29 15:30:47 -0800153 arena_bin_t *bin = &arena->bins[binind];
Jason Evansa8118232011-03-14 12:56:51 -0700154 malloc_mutex_lock(&bin->lock);
155 bin->stats.nflushes++;
156 bin->stats.nrequests += tbin->tstats.nrequests;
157 tbin->tstats.nrequests = 0;
158 malloc_mutex_unlock(&bin->lock);
159 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800160
Qi Wangf4a0f322015-10-27 15:12:10 -0700161 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
162 sizeof(void *));
Jason Evanse476f8a2010-01-16 09:53:50 -0800163 tbin->ncached = rem;
Jason Evans1dcb4f82011-03-21 00:18:17 -0700164 if ((int)tbin->ncached < tbin->low_water)
Jason Evans86815df2010-03-13 20:32:56 -0800165 tbin->low_water = tbin->ncached;
Jason Evanse476f8a2010-01-16 09:53:50 -0800166}
167
Jason Evansdafde142010-03-17 16:27:39 -0700168void
Jason Evansd01fd192015-08-19 15:21:32 -0700169tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
Jason Evans1cb181e2015-01-29 15:30:47 -0800170 unsigned rem, tcache_t *tcache)
Jason Evansdafde142010-03-17 16:27:39 -0700171{
Jason Evans1cb181e2015-01-29 15:30:47 -0800172 arena_t *arena;
Jason Evans84c8eef2011-03-16 10:30:13 -0700173 void *ptr;
Jason Evansdafde142010-03-17 16:27:39 -0700174 unsigned i, nflush, ndeferred;
Jason Evans84c8eef2011-03-16 10:30:13 -0700175 bool merged_stats = false;
Jason Evansdafde142010-03-17 16:27:39 -0700176
177 assert(binind < nhbins);
178 assert(rem <= tbin->ncached);
179
Jason Evans1cb181e2015-01-29 15:30:47 -0800180 arena = arena_choose(tsd, NULL);
181 assert(arena != NULL);
Jason Evans84c8eef2011-03-16 10:30:13 -0700182 for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
Jason Evansdafde142010-03-17 16:27:39 -0700183 /* Lock the arena associated with the first object. */
Jason Evans84c8eef2011-03-16 10:30:13 -0700184 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
Qi Wangf4a0f322015-10-27 15:12:10 -0700185 *(tbin->avail - 1));
Jason Evansee41ad42015-02-15 18:04:46 -0800186 arena_t *locked_arena = extent_node_arena_get(&chunk->node);
Jason Evans88c222c2013-02-06 11:59:30 -0800187 UNUSED bool idump;
Jason Evansdafde142010-03-17 16:27:39 -0700188
Jason Evans88c222c2013-02-06 11:59:30 -0800189 if (config_prof)
190 idump = false;
Jason Evans1cb181e2015-01-29 15:30:47 -0800191 malloc_mutex_lock(&locked_arena->lock);
192 if ((config_prof || config_stats) && locked_arena == arena) {
Jason Evans7372b152012-02-10 20:22:09 -0800193 if (config_prof) {
Jason Evans88c222c2013-02-06 11:59:30 -0800194 idump = arena_prof_accum_locked(arena,
Jason Evans7372b152012-02-10 20:22:09 -0800195 tcache->prof_accumbytes);
196 tcache->prof_accumbytes = 0;
197 }
198 if (config_stats) {
199 merged_stats = true;
200 arena->stats.nrequests_large +=
201 tbin->tstats.nrequests;
Jason Evansb1726102012-02-28 16:50:47 -0800202 arena->stats.lstats[binind - NBINS].nrequests +=
Jason Evans7372b152012-02-10 20:22:09 -0800203 tbin->tstats.nrequests;
204 tbin->tstats.nrequests = 0;
205 }
Jason Evansdafde142010-03-17 16:27:39 -0700206 }
Jason Evansdafde142010-03-17 16:27:39 -0700207 ndeferred = 0;
208 for (i = 0; i < nflush; i++) {
Qi Wangf4a0f322015-10-27 15:12:10 -0700209 ptr = *(tbin->avail - 1 - i);
Jason Evansdafde142010-03-17 16:27:39 -0700210 assert(ptr != NULL);
Jason Evansdafde142010-03-17 16:27:39 -0700211 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansee41ad42015-02-15 18:04:46 -0800212 if (extent_node_arena_get(&chunk->node) ==
213 locked_arena) {
Jason Evans1cb181e2015-01-29 15:30:47 -0800214 arena_dalloc_large_junked_locked(locked_arena,
215 chunk, ptr);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700216 } else {
Jason Evansdafde142010-03-17 16:27:39 -0700217 /*
218 * This object was allocated via a different
219 * arena than the one that is currently locked.
220 * Stash the object, so that it can be handled
221 * in a future pass.
222 */
Qi Wangf4a0f322015-10-27 15:12:10 -0700223 *(tbin->avail - 1 - ndeferred) = ptr;
Jason Evansdafde142010-03-17 16:27:39 -0700224 ndeferred++;
225 }
226 }
Jason Evans1cb181e2015-01-29 15:30:47 -0800227 malloc_mutex_unlock(&locked_arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -0800228 if (config_prof && idump)
229 prof_idump();
Christopher Ferrise4294032016-03-02 14:33:02 -0800230 arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
Jason Evansdafde142010-03-17 16:27:39 -0700231 }
Jason Evans551ebc42014-10-03 10:16:09 -0700232 if (config_stats && !merged_stats) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700233 /*
234 * The flush loop didn't happen to flush to this thread's
235 * arena, so the stats didn't get merged. Manually do so now.
236 */
Jason Evans84c8eef2011-03-16 10:30:13 -0700237 malloc_mutex_lock(&arena->lock);
238 arena->stats.nrequests_large += tbin->tstats.nrequests;
Jason Evansb1726102012-02-28 16:50:47 -0800239 arena->stats.lstats[binind - NBINS].nrequests +=
Jason Evans84c8eef2011-03-16 10:30:13 -0700240 tbin->tstats.nrequests;
241 tbin->tstats.nrequests = 0;
242 malloc_mutex_unlock(&arena->lock);
243 }
Jason Evansdafde142010-03-17 16:27:39 -0700244
Qi Wangf4a0f322015-10-27 15:12:10 -0700245 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
246 sizeof(void *));
Jason Evansdafde142010-03-17 16:27:39 -0700247 tbin->ncached = rem;
Jason Evans1dcb4f82011-03-21 00:18:17 -0700248 if ((int)tbin->ncached < tbin->low_water)
Jason Evansdafde142010-03-17 16:27:39 -0700249 tbin->low_water = tbin->ncached;
250}
251
Jason Evanscd9a1342012-03-21 18:33:03 -0700252void
253tcache_arena_associate(tcache_t *tcache, arena_t *arena)
254{
255
256 if (config_stats) {
257 /* Link into list of extant tcaches. */
258 malloc_mutex_lock(&arena->lock);
259 ql_elm_new(tcache, link);
260 ql_tail_insert(&arena->tcache_ql, tcache, link);
261 malloc_mutex_unlock(&arena->lock);
262 }
Jason Evanscd9a1342012-03-21 18:33:03 -0700263}
264
265void
Jason Evans1cb181e2015-01-29 15:30:47 -0800266tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
Jason Evans8bb31982014-10-07 23:14:57 -0700267{
268
Jason Evans1cb181e2015-01-29 15:30:47 -0800269 tcache_arena_dissociate(tcache, oldarena);
270 tcache_arena_associate(tcache, newarena);
Jason Evans8bb31982014-10-07 23:14:57 -0700271}
272
273void
Jason Evans1cb181e2015-01-29 15:30:47 -0800274tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
Jason Evanscd9a1342012-03-21 18:33:03 -0700275{
276
277 if (config_stats) {
278 /* Unlink from list of extant tcaches. */
Jason Evans1cb181e2015-01-29 15:30:47 -0800279 malloc_mutex_lock(&arena->lock);
280 if (config_debug) {
281 bool in_ql = false;
282 tcache_t *iter;
283 ql_foreach(iter, &arena->tcache_ql, link) {
284 if (iter == tcache) {
285 in_ql = true;
286 break;
287 }
288 }
289 assert(in_ql);
290 }
291 ql_remove(&arena->tcache_ql, tcache, link);
292 tcache_stats_merge(tcache, arena);
293 malloc_mutex_unlock(&arena->lock);
Jason Evanscd9a1342012-03-21 18:33:03 -0700294 }
295}
296
Jason Evanse476f8a2010-01-16 09:53:50 -0800297tcache_t *
Jason Evans5460aa62014-09-22 21:09:23 -0700298tcache_get_hard(tsd_t *tsd)
Ben Maurera7619b72014-04-15 13:28:37 -0700299{
Jason Evans8bb31982014-10-07 23:14:57 -0700300 arena_t *arena;
Ben Maurera7619b72014-04-15 13:28:37 -0700301
Jason Evans551ebc42014-10-03 10:16:09 -0700302 if (!tcache_enabled_get()) {
Jason Evans029d44c2014-10-04 11:12:53 -0700303 if (tsd_nominal(tsd))
304 tcache_enabled_set(false); /* Memoize. */
Ben Maurera7619b72014-04-15 13:28:37 -0700305 return (NULL);
306 }
Jason Evans8bb31982014-10-07 23:14:57 -0700307 arena = arena_choose(tsd, NULL);
308 if (unlikely(arena == NULL))
309 return (NULL);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700310 return (tcache_create(tsd, arena));
Ben Maurera7619b72014-04-15 13:28:37 -0700311}
312
313tcache_t *
Jason Evansfc0b3b72014-10-09 17:54:06 -0700314tcache_create(tsd_t *tsd, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800315{
316 tcache_t *tcache;
Jason Evans84c8eef2011-03-16 10:30:13 -0700317 size_t size, stack_offset;
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800318 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -0800319
Jason Evansc2fc8c82010-10-01 18:02:43 -0700320 size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
Jason Evans84c8eef2011-03-16 10:30:13 -0700321 /* Naturally align the pointer stacks. */
322 size = PTR_CEILING(size);
323 stack_offset = size;
324 size += stack_nelms * sizeof(void *);
Jason Evansfc0b3b72014-10-09 17:54:06 -0700325 /* Avoid false cacheline sharing. */
326 size = sa2u(size, CACHELINE);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800327
Christopher Ferrise4294032016-03-02 14:33:02 -0800328 tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
329 arena_get(0, false));
Jason Evanse476f8a2010-01-16 09:53:50 -0800330 if (tcache == NULL)
331 return (NULL);
332
Jason Evanscd9a1342012-03-21 18:33:03 -0700333 tcache_arena_associate(tcache, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800334
Christopher Ferrise4294032016-03-02 14:33:02 -0800335 ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
336
Jason Evansdafde142010-03-17 16:27:39 -0700337 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
Jason Evans84c8eef2011-03-16 10:30:13 -0700338 for (i = 0; i < nhbins; i++) {
Jason Evans1dcb4f82011-03-21 00:18:17 -0700339 tcache->tbins[i].lg_fill_div = 1;
Qi Wangf4a0f322015-10-27 15:12:10 -0700340 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
341 /*
342 * avail points past the available space. Allocations will
343 * access the slots toward higher addresses (for the benefit of
344 * prefetch).
345 */
Jason Evans84c8eef2011-03-16 10:30:13 -0700346 tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
347 (uintptr_t)stack_offset);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800348 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800349
Jason Evanse476f8a2010-01-16 09:53:50 -0800350 return (tcache);
351}
352
Jason Evans5460aa62014-09-22 21:09:23 -0700353static void
354tcache_destroy(tsd_t *tsd, tcache_t *tcache)
Jason Evanse476f8a2010-01-16 09:53:50 -0800355{
Jason Evans1cb181e2015-01-29 15:30:47 -0800356 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -0800357 unsigned i;
358
Jason Evans1cb181e2015-01-29 15:30:47 -0800359 arena = arena_choose(tsd, NULL);
360 tcache_arena_dissociate(tcache, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -0800361
Jason Evansb1726102012-02-28 16:50:47 -0800362 for (i = 0; i < NBINS; i++) {
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800363 tcache_bin_t *tbin = &tcache->tbins[i];
Jason Evans41cfe032015-02-13 15:28:56 -0800364 tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800365
Jason Evans7372b152012-02-10 20:22:09 -0800366 if (config_stats && tbin->tstats.nrequests != 0) {
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800367 arena_bin_t *bin = &arena->bins[i];
Jason Evans86815df2010-03-13 20:32:56 -0800368 malloc_mutex_lock(&bin->lock);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800369 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans86815df2010-03-13 20:32:56 -0800370 malloc_mutex_unlock(&bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -0800371 }
372 }
373
Jason Evansdafde142010-03-17 16:27:39 -0700374 for (; i < nhbins; i++) {
375 tcache_bin_t *tbin = &tcache->tbins[i];
Jason Evans1cb181e2015-01-29 15:30:47 -0800376 tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
Jason Evansdafde142010-03-17 16:27:39 -0700377
Jason Evans7372b152012-02-10 20:22:09 -0800378 if (config_stats && tbin->tstats.nrequests != 0) {
Jason Evansdafde142010-03-17 16:27:39 -0700379 malloc_mutex_lock(&arena->lock);
380 arena->stats.nrequests_large += tbin->tstats.nrequests;
Jason Evansb1726102012-02-28 16:50:47 -0800381 arena->stats.lstats[i - NBINS].nrequests +=
Jason Evansdafde142010-03-17 16:27:39 -0700382 tbin->tstats.nrequests;
383 malloc_mutex_unlock(&arena->lock);
384 }
Jason Evansdafde142010-03-17 16:27:39 -0700385 }
386
Jason Evans88c222c2013-02-06 11:59:30 -0800387 if (config_prof && tcache->prof_accumbytes > 0 &&
Jason Evans1cb181e2015-01-29 15:30:47 -0800388 arena_prof_accum(arena, tcache->prof_accumbytes))
Jason Evans88c222c2013-02-06 11:59:30 -0800389 prof_idump();
Jason Evansd34f9e72010-02-11 13:19:21 -0800390
Qi Wangf4a0f322015-10-27 15:12:10 -0700391 idalloctm(tsd, tcache, false, true, true);
Jason Evanse476f8a2010-01-16 09:53:50 -0800392}
393
Jason Evanscd9a1342012-03-21 18:33:03 -0700394void
Jason Evans5460aa62014-09-22 21:09:23 -0700395tcache_cleanup(tsd_t *tsd)
Jason Evanse476f8a2010-01-16 09:53:50 -0800396{
Jason Evans5460aa62014-09-22 21:09:23 -0700397 tcache_t *tcache;
Jason Evanse476f8a2010-01-16 09:53:50 -0800398
Jason Evans5460aa62014-09-22 21:09:23 -0700399 if (!config_tcache)
400 return;
401
402 if ((tcache = tsd_tcache_get(tsd)) != NULL) {
403 tcache_destroy(tsd, tcache);
404 tsd_tcache_set(tsd, NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -0800405 }
406}
407
Jason Evans5460aa62014-09-22 21:09:23 -0700408void
409tcache_enabled_cleanup(tsd_t *tsd)
410{
411
412 /* Do nothing. */
413}
414
Jason Evans30e7cb12013-10-21 15:00:06 -0700415/* Caller must own arena->lock. */
Jason Evanse476f8a2010-01-16 09:53:50 -0800416void
417tcache_stats_merge(tcache_t *tcache, arena_t *arena)
418{
419 unsigned i;
420
Jason Evans30e7cb12013-10-21 15:00:06 -0700421 cassert(config_stats);
422
Jason Evanse476f8a2010-01-16 09:53:50 -0800423 /* Merge and reset tcache stats. */
Jason Evansb1726102012-02-28 16:50:47 -0800424 for (i = 0; i < NBINS; i++) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800425 arena_bin_t *bin = &arena->bins[i];
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800426 tcache_bin_t *tbin = &tcache->tbins[i];
Jason Evans86815df2010-03-13 20:32:56 -0800427 malloc_mutex_lock(&bin->lock);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800428 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans86815df2010-03-13 20:32:56 -0800429 malloc_mutex_unlock(&bin->lock);
Jason Evans3fa9a2f2010-03-07 15:34:14 -0800430 tbin->tstats.nrequests = 0;
Jason Evanse476f8a2010-01-16 09:53:50 -0800431 }
Jason Evansdafde142010-03-17 16:27:39 -0700432
433 for (; i < nhbins; i++) {
Jason Evansb1726102012-02-28 16:50:47 -0800434 malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
Jason Evansdafde142010-03-17 16:27:39 -0700435 tcache_bin_t *tbin = &tcache->tbins[i];
436 arena->stats.nrequests_large += tbin->tstats.nrequests;
437 lstats->nrequests += tbin->tstats.nrequests;
438 tbin->tstats.nrequests = 0;
439 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800440}
Jason Evanse476f8a2010-01-16 09:53:50 -0800441
Jason Evans84c8eef2011-03-16 10:30:13 -0700442bool
Jason Evans1cb181e2015-01-29 15:30:47 -0800443tcaches_create(tsd_t *tsd, unsigned *r_ind)
444{
445 tcache_t *tcache;
446 tcaches_t *elm;
447
448 if (tcaches == NULL) {
449 tcaches = base_alloc(sizeof(tcache_t *) *
450 (MALLOCX_TCACHE_MAX+1));
451 if (tcaches == NULL)
452 return (true);
453 }
454
455 if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
456 return (true);
Christopher Ferrise4294032016-03-02 14:33:02 -0800457 tcache = tcache_create(tsd, arena_get(0, false));
Jason Evans1cb181e2015-01-29 15:30:47 -0800458 if (tcache == NULL)
459 return (true);
460
461 if (tcaches_avail != NULL) {
462 elm = tcaches_avail;
463 tcaches_avail = tcaches_avail->next;
464 elm->tcache = tcache;
Christopher Ferrise4294032016-03-02 14:33:02 -0800465 *r_ind = (unsigned)(elm - tcaches);
Jason Evans1cb181e2015-01-29 15:30:47 -0800466 } else {
467 elm = &tcaches[tcaches_past];
468 elm->tcache = tcache;
469 *r_ind = tcaches_past;
470 tcaches_past++;
471 }
472
473 return (false);
474}
475
476static void
477tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
478{
479
480 if (elm->tcache == NULL)
481 return;
482 tcache_destroy(tsd, elm->tcache);
483 elm->tcache = NULL;
484}
485
486void
487tcaches_flush(tsd_t *tsd, unsigned ind)
488{
489
490 tcaches_elm_flush(tsd, &tcaches[ind]);
491}
492
493void
494tcaches_destroy(tsd_t *tsd, unsigned ind)
495{
496 tcaches_t *elm = &tcaches[ind];
497 tcaches_elm_flush(tsd, elm);
498 elm->next = tcaches_avail;
499 tcaches_avail = elm;
500}
501
502bool
Jason Evans5460aa62014-09-22 21:09:23 -0700503tcache_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -0800504{
Jason Evans37013672012-04-06 12:41:55 -0700505 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -0800506
Jason Evans37013672012-04-06 12:41:55 -0700507 /*
Jason Evans676df882015-09-11 20:50:20 -0700508 * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
Jason Evans37013672012-04-06 12:41:55 -0700509 * known.
510 */
511 if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
512 tcache_maxclass = SMALL_MAXCLASS;
Jason Evans676df882015-09-11 20:50:20 -0700513 else if ((1U << opt_lg_tcache_max) > large_maxclass)
514 tcache_maxclass = large_maxclass;
Jason Evans37013672012-04-06 12:41:55 -0700515 else
516 tcache_maxclass = (1U << opt_lg_tcache_max);
Jason Evans84c8eef2011-03-16 10:30:13 -0700517
Jason Evans5aa50a22015-05-19 17:40:37 -0700518 nhbins = size2index(tcache_maxclass) + 1;
Jason Evansdafde142010-03-17 16:27:39 -0700519
Jason Evans37013672012-04-06 12:41:55 -0700520 /* Initialize tcache_bin_info. */
521 tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
522 sizeof(tcache_bin_info_t));
523 if (tcache_bin_info == NULL)
524 return (true);
525 stack_nelms = 0;
526 for (i = 0; i < NBINS; i++) {
Jason Evans836bbe92015-05-19 17:47:16 -0700527 if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
528 tcache_bin_info[i].ncached_max =
529 TCACHE_NSLOTS_SMALL_MIN;
530 } else if ((arena_bin_info[i].nregs << 1) <=
531 TCACHE_NSLOTS_SMALL_MAX) {
Jason Evans37013672012-04-06 12:41:55 -0700532 tcache_bin_info[i].ncached_max =
533 (arena_bin_info[i].nregs << 1);
534 } else {
535 tcache_bin_info[i].ncached_max =
536 TCACHE_NSLOTS_SMALL_MAX;
Jason Evans84c8eef2011-03-16 10:30:13 -0700537 }
Jason Evans37013672012-04-06 12:41:55 -0700538 stack_nelms += tcache_bin_info[i].ncached_max;
539 }
540 for (; i < nhbins; i++) {
541 tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
542 stack_nelms += tcache_bin_info[i].ncached_max;
Jason Evanscd9a1342012-03-21 18:33:03 -0700543 }
Jason Evans84c8eef2011-03-16 10:30:13 -0700544
Jason Evanscd9a1342012-03-21 18:33:03 -0700545 return (false);
546}