blob: 5681a3f36d40c0b3ad75d2b3ceec9978e51cc0ff [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_BASE_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
Jason Evans4201af02010-01-24 02:53:40 -08004/******************************************************************************/
5/* Data. */
Jason Evanse476f8a2010-01-16 09:53:50 -08006
Jason Evans4e2e3dd2012-03-13 16:31:41 -07007static malloc_mutex_t base_mtx;
Jason Evans5c77af92016-11-14 18:27:23 -08008static size_t base_extent_sn_next;
9static extent_tree_t base_avail_szsnad;
Jason Evanse476f8a2010-01-16 09:53:50 -080010static extent_node_t *base_nodes;
Jason Evans4581b972014-11-27 17:22:36 -020011static size_t base_allocated;
Jason Evans4acd75a2015-03-23 17:25:57 -070012static size_t base_resident;
13static size_t base_mapped;
Jason Evans4581b972014-11-27 17:22:36 -020014
Jason Evans4201af02010-01-24 02:53:40 -080015/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080016
Jason Evansf500a102015-01-30 21:49:19 -080017static extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -070018base_node_try_alloc(tsdn_t *tsdn)
Jason Evanse476f8a2010-01-16 09:53:50 -080019{
Jason Evansf500a102015-01-30 21:49:19 -080020 extent_node_t *node;
Jason Evanse476f8a2010-01-16 09:53:50 -080021
Jason Evansc1e00ef2016-05-10 22:21:10 -070022 malloc_mutex_assert_owner(tsdn, &base_mtx);
Jason Evansd9394d02016-04-17 12:33:39 -070023
Jason Evansf500a102015-01-30 21:49:19 -080024 if (base_nodes == NULL)
25 return (NULL);
26 node = base_nodes;
27 base_nodes = *(extent_node_t **)node;
28 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
29 return (node);
Jason Evanse476f8a2010-01-16 09:53:50 -080030}
31
Jason Evansf500a102015-01-30 21:49:19 -080032static void
Jason Evansc1e00ef2016-05-10 22:21:10 -070033base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
Jason Evansf500a102015-01-30 21:49:19 -080034{
35
Jason Evansc1e00ef2016-05-10 22:21:10 -070036 malloc_mutex_assert_owner(tsdn, &base_mtx);
Jason Evansd9394d02016-04-17 12:33:39 -070037
Jason Evansf500a102015-01-30 21:49:19 -080038 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
39 *(extent_node_t **)node = base_nodes;
40 base_nodes = node;
41}
42
Jason Evans5c77af92016-11-14 18:27:23 -080043static void
44base_extent_node_init(extent_node_t *node, void *addr, size_t size)
45{
46 size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
47
48 extent_node_init(node, NULL, addr, size, sn, true, true);
49}
50
Jason Evansf500a102015-01-30 21:49:19 -080051static extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -070052base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
Jason Evansf500a102015-01-30 21:49:19 -080053{
54 extent_node_t *node;
55 size_t csize, nsize;
56 void *addr;
57
Jason Evansc1e00ef2016-05-10 22:21:10 -070058 malloc_mutex_assert_owner(tsdn, &base_mtx);
Jason Evansf500a102015-01-30 21:49:19 -080059 assert(minsize != 0);
Jason Evansc1e00ef2016-05-10 22:21:10 -070060 node = base_node_try_alloc(tsdn);
Jason Evansf500a102015-01-30 21:49:19 -080061 /* Allocate enough space to also carve a node out if necessary. */
62 nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
63 csize = CHUNK_CEILING(minsize + nsize);
64 addr = chunk_alloc_base(csize);
65 if (addr == NULL) {
66 if (node != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -070067 base_node_dalloc(tsdn, node);
Jason Evansf500a102015-01-30 21:49:19 -080068 return (NULL);
69 }
Jason Evans4acd75a2015-03-23 17:25:57 -070070 base_mapped += csize;
Jason Evansf500a102015-01-30 21:49:19 -080071 if (node == NULL) {
Jason Evans4acd75a2015-03-23 17:25:57 -070072 node = (extent_node_t *)addr;
73 addr = (void *)((uintptr_t)addr + nsize);
Jason Evansf500a102015-01-30 21:49:19 -080074 csize -= nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070075 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -080076 base_allocated += nsize;
Jason Evans4acd75a2015-03-23 17:25:57 -070077 base_resident += PAGE_CEILING(nsize);
78 }
Jason Evansf500a102015-01-30 21:49:19 -080079 }
Jason Evans5c77af92016-11-14 18:27:23 -080080 base_extent_node_init(node, addr, csize);
Jason Evansf500a102015-01-30 21:49:19 -080081 return (node);
82}
83
Jason Evanscbf3a6d2015-02-11 12:24:27 -080084/*
85 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
86 * sparse data structures such as radix tree nodes efficient with respect to
87 * physical memory usage.
88 */
89void *
Jason Evansc1e00ef2016-05-10 22:21:10 -070090base_alloc(tsdn_t *tsdn, size_t size)
Jason Evansf500a102015-01-30 21:49:19 -080091{
92 void *ret;
Jason Evans5707d6f2015-03-06 17:14:05 -080093 size_t csize, usize;
Jason Evansf500a102015-01-30 21:49:19 -080094 extent_node_t *node;
95 extent_node_t key;
96
97 /*
98 * Round size up to nearest multiple of the cacheline size, so that
99 * there is no chance of false cache line sharing.
100 */
101 csize = CACHELINE_CEILING(size);
102
Jason Evans5707d6f2015-03-06 17:14:05 -0800103 usize = s2u(csize);
Jason Evans5c77af92016-11-14 18:27:23 -0800104 extent_node_init(&key, NULL, NULL, usize, 0, false, false);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700105 malloc_mutex_lock(tsdn, &base_mtx);
Jason Evans5c77af92016-11-14 18:27:23 -0800106 node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
Jason Evansf500a102015-01-30 21:49:19 -0800107 if (node != NULL) {
108 /* Use existing space. */
Jason Evans5c77af92016-11-14 18:27:23 -0800109 extent_tree_szsnad_remove(&base_avail_szsnad, node);
Jason Evansf500a102015-01-30 21:49:19 -0800110 } else {
111 /* Try to allocate more space. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700112 node = base_chunk_alloc(tsdn, csize);
Jason Evansf500a102015-01-30 21:49:19 -0800113 }
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800114 if (node == NULL) {
115 ret = NULL;
116 goto label_return;
117 }
Jason Evansf500a102015-01-30 21:49:19 -0800118
Jason Evansee41ad42015-02-15 18:04:46 -0800119 ret = extent_node_addr_get(node);
120 if (extent_node_size_get(node) > csize) {
121 extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
122 extent_node_size_set(node, extent_node_size_get(node) - csize);
Jason Evans5c77af92016-11-14 18:27:23 -0800123 extent_tree_szsnad_insert(&base_avail_szsnad, node);
Jason Evansf500a102015-01-30 21:49:19 -0800124 } else
Jason Evansc1e00ef2016-05-10 22:21:10 -0700125 base_node_dalloc(tsdn, node);
Jason Evans4acd75a2015-03-23 17:25:57 -0700126 if (config_stats) {
Jason Evansf500a102015-01-30 21:49:19 -0800127 base_allocated += csize;
Jason Evans4acd75a2015-03-23 17:25:57 -0700128 /*
129 * Add one PAGE to base_resident for every page boundary that is
130 * crossed by the new allocation.
131 */
132 base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
133 PAGE_CEILING((uintptr_t)ret);
134 }
Jason Evans4f6f2b12015-06-22 14:38:06 -0700135 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800136label_return:
Jason Evansc1e00ef2016-05-10 22:21:10 -0700137 malloc_mutex_unlock(tsdn, &base_mtx);
Jason Evans41b6afb2012-02-02 22:04:57 -0800138 return (ret);
139}
140
Jason Evans4acd75a2015-03-23 17:25:57 -0700141void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700142base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
143 size_t *mapped)
Jason Evans4581b972014-11-27 17:22:36 -0200144{
Jason Evans4581b972014-11-27 17:22:36 -0200145
Jason Evansc1e00ef2016-05-10 22:21:10 -0700146 malloc_mutex_lock(tsdn, &base_mtx);
Jason Evans56048ba2015-05-28 15:03:58 -0700147 assert(base_allocated <= base_resident);
148 assert(base_resident <= base_mapped);
Jason Evans4acd75a2015-03-23 17:25:57 -0700149 *allocated = base_allocated;
150 *resident = base_resident;
151 *mapped = base_mapped;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700152 malloc_mutex_unlock(tsdn, &base_mtx);
Jason Evans4581b972014-11-27 17:22:36 -0200153}
154
Jason Evanse476f8a2010-01-16 09:53:50 -0800155bool
156base_boot(void)
157{
158
Jason Evansb2c0d632016-04-13 23:36:15 -0700159 if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
Jason Evanse476f8a2010-01-16 09:53:50 -0800160 return (true);
Jason Evans5c77af92016-11-14 18:27:23 -0800161 base_extent_sn_next = 0;
162 extent_tree_szsnad_new(&base_avail_szsnad);
Jason Evansf500a102015-01-30 21:49:19 -0800163 base_nodes = NULL;
Jason Evanse476f8a2010-01-16 09:53:50 -0800164
165 return (false);
166}
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700167
168void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700169base_prefork(tsdn_t *tsdn)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700170{
171
Jason Evansc1e00ef2016-05-10 22:21:10 -0700172 malloc_mutex_prefork(tsdn, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700173}
174
175void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700176base_postfork_parent(tsdn_t *tsdn)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700177{
178
Jason Evansc1e00ef2016-05-10 22:21:10 -0700179 malloc_mutex_postfork_parent(tsdn, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700180}
181
182void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700183base_postfork_child(tsdn_t *tsdn)
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700184{
185
Jason Evansc1e00ef2016-05-10 22:21:10 -0700186 malloc_mutex_postfork_child(tsdn, &base_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700187}