blob: 7cdcfed86bd8d435d8d715526dbba62647ab8a7a [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_BASE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
Jason Evans4201af02010-01-24 02:53:40 -08004/******************************************************************************/
5/* Data. */
Jason Evanse476f8a2010-01-16 09:53:50 -08006
Jason Evans4e2e3dd2012-03-13 16:31:41 -07007static malloc_mutex_t base_mtx;
Jason Evansf500a102015-01-30 21:49:19 -08008static extent_tree_t base_avail_szad;
Jason Evanse476f8a2010-01-16 09:53:50 -08009static extent_node_t *base_nodes;
Jason Evans4581b972014-11-27 17:22:36 -020010static size_t base_allocated;
Jason Evans4acd75a2015-03-23 17:25:57 -070011static size_t base_resident;
12static size_t base_mapped;
Jason Evans4581b972014-11-27 17:22:36 -020013
Jason Evans4201af02010-01-24 02:53:40 -080014/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080015
Jason Evanscbf3a6d2015-02-11 12:24:27 -080016/* base_mtx must be held. */
Jason Evansf500a102015-01-30 21:49:19 -080017static extent_node_t *
Jason Evanscbf3a6d2015-02-11 12:24:27 -080018base_node_try_alloc(void)
Jason Evanse476f8a2010-01-16 09:53:50 -080019{
Jason Evansf500a102015-01-30 21:49:19 -080020 extent_node_t *node;
Jason Evanse476f8a2010-01-16 09:53:50 -080021
Jason Evansf500a102015-01-30 21:49:19 -080022 if (base_nodes == NULL)
23 return (NULL);
24 node = base_nodes;
25 base_nodes = *(extent_node_t **)node;
26 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
27 return (node);
Jason Evanse476f8a2010-01-16 09:53:50 -080028}
29
Jason Evanscbf3a6d2015-02-11 12:24:27 -080030/* base_mtx must be held. */
Jason Evansf500a102015-01-30 21:49:19 -080031static void
Jason Evanscbf3a6d2015-02-11 12:24:27 -080032base_node_dalloc(extent_node_t *node)
Jason Evansf500a102015-01-30 21:49:19 -080033{
34
35 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
36 *(extent_node_t **)node = base_nodes;
37 base_nodes = node;
38}
39
40/* base_mtx must be held. */
41static extent_node_t *
42base_chunk_alloc(size_t minsize)
43{
44 extent_node_t *node;
45 size_t csize, nsize;
46 void *addr;
47
48 assert(minsize != 0);
Jason Evanscbf3a6d2015-02-11 12:24:27 -080049 node = base_node_try_alloc();
Jason Evansf500a102015-01-30 21:49:19 -080050 /* Allocate enough space to also carve a node out if necessary. */
51 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
52 csize = CHUNK_CEILING(minsize + nsize);
53 addr = chunk_alloc_base(csize);
54 if (addr == NULL) {
55 if (node != NULL)
Jason Evanscbf3a6d2015-02-11 12:24:27 -080056 base_node_dalloc(node);
Jason Evansf500a102015-01-30 21:49:19 -080057 return (NULL);
58 }
Jason Evans4acd75a2015-03-23 17:25:57 -070059 base_mapped += csize;
Jason Evansf500a102015-01-30 21:49:19 -080060 if (node == NULL) {
Jason Evans4acd75a2015-03-23 17:25:57 -070061 node = (extent_node_t *)addr;
62 addr = (void *)((uintptr_t)addr + nsize);
Jason Evansf500a102015-01-30 21:49:19 -080063 csize -= nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070064 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -080065 base_allocated += nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070066 base_resident += PAGE_CEILING(nsize);
67 }
Jason Evansf500a102015-01-30 21:49:19 -080068 }
Jason Evansb49a3342015-07-28 11:28:19 -040069 extent_node_init(node, NULL, addr, csize, true, true);
Jason Evansf500a102015-01-30 21:49:19 -080070 return (node);
71}
72
Jason Evanscbf3a6d2015-02-11 12:24:27 -080073/*
74 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
75 * sparse data structures such as radix tree nodes efficient with respect to
76 * physical memory usage.
77 */
78void *
79base_alloc(size_t size)
Jason Evansf500a102015-01-30 21:49:19 -080080{
81 void *ret;
Jason Evans5707d6f2015-03-06 17:14:05 -080082 size_t csize, usize;
Jason Evansf500a102015-01-30 21:49:19 -080083 extent_node_t *node;
84 extent_node_t key;
85
86 /*
87 * Round size up to nearest multiple of the cacheline size, so that
88 * there is no chance of false cache line sharing.
89 */
90 csize = CACHELINE_CEILING(size);
91
Jason Evans5707d6f2015-03-06 17:14:05 -080092 usize = s2u(csize);
Jason Evans8fadb1a2015-08-04 10:49:46 -070093 extent_node_init(&key, NULL, NULL, usize, false, false);
Jason Evanscbf3a6d2015-02-11 12:24:27 -080094 malloc_mutex_lock(&base_mtx);
Jason Evansf500a102015-01-30 21:49:19 -080095 node = extent_tree_szad_nsearch(&base_avail_szad, &key);
96 if (node != NULL) {
97 /* Use existing space. */
98 extent_tree_szad_remove(&base_avail_szad, node);
99 } else {
100 /* Try to allocate more space. */
101 node = base_chunk_alloc(csize);
102 }
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800103 if (node == NULL) {
104 ret = NULL;
105 goto label_return;
106 }
Jason Evansf500a102015-01-30 21:49:19 -0800107
Jason Evansee41ad42015-02-15 18:04:46 -0800108 ret = extent_node_addr_get(node);
109 if (extent_node_size_get(node) > csize) {
110 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
111 extent_node_size_set(node, extent_node_size_get(node) - csize);
Jason Evansf500a102015-01-30 21:49:19 -0800112 extent_tree_szad_insert(&base_avail_szad, node);
113 } else
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800114 base_node_dalloc(node);
Jason Evans4acd75a2015-03-23 17:25:57 -0700115 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -0800116 base_allocated += csize;
Jason Evans4acd75a2015-03-23 17:25:57 -0700117 /*
118 * Add one PAGE to base_resident for every page boundary that is
119 * crossed by the new allocation.
120 */
121 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
122 PAGE_CEILING((uintptr_t)ret);
123 }
Jason Evans4f6f2b12015-06-22 14:38:06 -0700124 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800125label_return:
Jason Evanse476f8a2010-01-16 09:53:50 -0800126 malloc_mutex_unlock(&base_mtx);
Jason Evans41b6afb2012-02-02 22:04:57 -0800127 return (ret);
128}
129
Jason Evans4acd75a2015-03-23 17:25:57 -0700130void
131base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
Jason Evans4581b972014-11-27 17:22:36 -0200132{
Jason Evans4581b972014-11-27 17:22:36 -0200133
134 malloc_mutex_lock(&base_mtx);
Jason Evans56048ba2015-05-28 15:03:58 -0700135 assert(base_allocated <= base_resident);
136 assert(base_resident <= base_mapped);
Jason Evans4acd75a2015-03-23 17:25:57 -0700137 *allocated = base_allocated;
138 *resident = base_resident;
139 *mapped = base_mapped;
Jason Evans4581b972014-11-27 17:22:36 -0200140 malloc_mutex_unlock(&base_mtx);
Jason Evans4581b972014-11-27 17:22:36 -0200141}
142
Jason Evanse476f8a2010-01-16 09:53:50 -0800143bool
144base_boot(void)
145{
146
Jason Evanse476f8a2010-01-16 09:53:50 -0800147 if (malloc_mutex_init(&base_mtx))
148 return (true);
Jason Evansf500a102015-01-30 21:49:19 -0800149 extent_tree_szad_new(&base_avail_szad);
150 base_nodes = NULL;
Jason Evanse476f8a2010-01-16 09:53:50 -0800151
152 return (false);
153}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700154
155void
156base_prefork(void)
157{
158
159 malloc_mutex_prefork(&base_mtx);
160}
161
162void
163base_postfork_parent(void)
164{
165
166 malloc_mutex_postfork_parent(&base_mtx);
167}
168
169void
170base_postfork_child(void)
171{
172
173 malloc_mutex_postfork_child(&base_mtx);
174}