Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1 | #define JEMALLOC_BASE_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 3 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 6 | |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 7 | static malloc_mutex_t base_mtx; |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 8 | static extent_tree_t base_avail_szad; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 9 | static extent_node_t *base_nodes; |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 10 | static size_t base_allocated; |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 11 | static size_t base_resident; |
| 12 | static size_t base_mapped; |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 13 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 14 | /******************************************************************************/ |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 15 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 16 | /* base_mtx must be held. */ |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 17 | static extent_node_t * |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 18 | base_node_try_alloc(void) |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 19 | { |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 20 | extent_node_t *node; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 21 | |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 22 | if (base_nodes == NULL) |
| 23 | return (NULL); |
| 24 | node = base_nodes; |
| 25 | base_nodes = *(extent_node_t **)node; |
| 26 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); |
| 27 | return (node); |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 28 | } |
| 29 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 30 | /* base_mtx must be held. */ |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 31 | static void |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 32 | base_node_dalloc(extent_node_t *node) |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 33 | { |
| 34 | |
| 35 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); |
| 36 | *(extent_node_t **)node = base_nodes; |
| 37 | base_nodes = node; |
| 38 | } |
| 39 | |
| 40 | /* base_mtx must be held. */ |
| 41 | static extent_node_t * |
| 42 | base_chunk_alloc(size_t minsize) |
| 43 | { |
| 44 | extent_node_t *node; |
| 45 | size_t csize, nsize; |
| 46 | void *addr; |
| 47 | |
| 48 | assert(minsize != 0); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 49 | node = base_node_try_alloc(); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 50 | /* Allocate enough space to also carve a node out if necessary. */ |
| 51 | nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; |
| 52 | csize = CHUNK_CEILING(minsize + nsize); |
| 53 | addr = chunk_alloc_base(csize); |
| 54 | if (addr == NULL) { |
| 55 | if (node != NULL) |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 56 | base_node_dalloc(node); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 57 | return (NULL); |
| 58 | } |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 59 | base_mapped += csize; |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 60 | if (node == NULL) { |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 61 | node = (extent_node_t *)addr; |
| 62 | addr = (void *)((uintptr_t)addr + nsize); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 63 | csize -= nsize; |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 64 | if (config_stats) { |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 65 | base_allocated += nsize; |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 66 | base_resident += PAGE_CEILING(nsize); |
| 67 | } |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 68 | } |
Jason Evans | b49a334 | 2015-07-28 11:28:19 -0400 | [diff] [blame] | 69 | extent_node_init(node, NULL, addr, csize, true, true); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 70 | return (node); |
| 71 | } |
| 72 | |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 73 | /* |
| 74 | * base_alloc() guarantees demand-zeroed memory, in order to make multi-page |
| 75 | * sparse data structures such as radix tree nodes efficient with respect to |
| 76 | * physical memory usage. |
| 77 | */ |
| 78 | void * |
| 79 | base_alloc(size_t size) |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 80 | { |
| 81 | void *ret; |
Jason Evans | 5707d6f | 2015-03-06 17:14:05 -0800 | [diff] [blame] | 82 | size_t csize, usize; |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 83 | extent_node_t *node; |
| 84 | extent_node_t key; |
| 85 | |
| 86 | /* |
| 87 | * Round size up to nearest multiple of the cacheline size, so that |
| 88 | * there is no chance of false cache line sharing. |
| 89 | */ |
| 90 | csize = CACHELINE_CEILING(size); |
| 91 | |
Jason Evans | 5707d6f | 2015-03-06 17:14:05 -0800 | [diff] [blame] | 92 | usize = s2u(csize); |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 93 | extent_node_init(&key, NULL, NULL, usize, false, false); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 94 | malloc_mutex_lock(&base_mtx); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 95 | node = extent_tree_szad_nsearch(&base_avail_szad, &key); |
| 96 | if (node != NULL) { |
| 97 | /* Use existing space. */ |
| 98 | extent_tree_szad_remove(&base_avail_szad, node); |
| 99 | } else { |
| 100 | /* Try to allocate more space. */ |
| 101 | node = base_chunk_alloc(csize); |
| 102 | } |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 103 | if (node == NULL) { |
| 104 | ret = NULL; |
| 105 | goto label_return; |
| 106 | } |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 107 | |
Jason Evans | ee41ad4 | 2015-02-15 18:04:46 -0800 | [diff] [blame] | 108 | ret = extent_node_addr_get(node); |
| 109 | if (extent_node_size_get(node) > csize) { |
| 110 | extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); |
| 111 | extent_node_size_set(node, extent_node_size_get(node) - csize); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 112 | extent_tree_szad_insert(&base_avail_szad, node); |
| 113 | } else |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 114 | base_node_dalloc(node); |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 115 | if (config_stats) { |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 116 | base_allocated += csize; |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Add one PAGE to base_resident for every page boundary that is |
| 119 | * crossed by the new allocation. |
| 120 | */ |
| 121 | base_resident += PAGE_CEILING((uintptr_t)ret + csize) - |
| 122 | PAGE_CEILING((uintptr_t)ret); |
| 123 | } |
Jason Evans | 4f6f2b1 | 2015-06-22 14:38:06 -0700 | [diff] [blame] | 124 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); |
Jason Evans | cbf3a6d | 2015-02-11 12:24:27 -0800 | [diff] [blame] | 125 | label_return: |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 126 | malloc_mutex_unlock(&base_mtx); |
Jason Evans | 41b6afb | 2012-02-02 22:04:57 -0800 | [diff] [blame] | 127 | return (ret); |
| 128 | } |
| 129 | |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 130 | void |
| 131 | base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 132 | { |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 133 | |
| 134 | malloc_mutex_lock(&base_mtx); |
Jason Evans | 56048ba | 2015-05-28 15:03:58 -0700 | [diff] [blame] | 135 | assert(base_allocated <= base_resident); |
| 136 | assert(base_resident <= base_mapped); |
Jason Evans | 4acd75a | 2015-03-23 17:25:57 -0700 | [diff] [blame] | 137 | *allocated = base_allocated; |
| 138 | *resident = base_resident; |
| 139 | *mapped = base_mapped; |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 140 | malloc_mutex_unlock(&base_mtx); |
Jason Evans | 4581b97 | 2014-11-27 17:22:36 -0200 | [diff] [blame] | 141 | } |
| 142 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 143 | bool |
| 144 | base_boot(void) |
| 145 | { |
| 146 | |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 147 | if (malloc_mutex_init(&base_mtx)) |
| 148 | return (true); |
Jason Evans | f500a10 | 2015-01-30 21:49:19 -0800 | [diff] [blame] | 149 | extent_tree_szad_new(&base_avail_szad); |
| 150 | base_nodes = NULL; |
Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 151 | |
| 152 | return (false); |
| 153 | } |
Jason Evans | 4e2e3dd | 2012-03-13 16:31:41 -0700 | [diff] [blame] | 154 | |
| 155 | void |
| 156 | base_prefork(void) |
| 157 | { |
| 158 | |
| 159 | malloc_mutex_prefork(&base_mtx); |
| 160 | } |
| 161 | |
| 162 | void |
| 163 | base_postfork_parent(void) |
| 164 | { |
| 165 | |
| 166 | malloc_mutex_postfork_parent(&base_mtx); |
| 167 | } |
| 168 | |
| 169 | void |
| 170 | base_postfork_child(void) |
| 171 | { |
| 172 | |
| 173 | malloc_mutex_postfork_child(&base_mtx); |
| 174 | } |