Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_MMAP_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
Mike Hommey | 666c5bf | 2012-04-18 18:29:43 +0200 | [diff] [blame] | 5 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 6 | static void * |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 7 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 8 | { |
Dmitry-Me | 78ae1ac | 2015-09-08 15:09:20 +0300 | [diff] [blame] | 9 | void *ret; |
| 10 | size_t alloc_size; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 11 | |
Jason Evans | 05a9e4a | 2016-06-07 14:19:50 -0700 | [diff] [blame] | 12 | alloc_size = size + alignment - PAGE; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 13 | /* Beware size_t wrap-around. */ |
Jason Evans | 5ff709c | 2012-04-11 18:13:45 -0700 | [diff] [blame] | 14 | if (alloc_size < size) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 15 | return (NULL); |
Mike Hommey | a19e87f | 2012-04-21 21:27:46 -0700 | [diff] [blame] | 16 | do { |
Dmitry-Me | 78ae1ac | 2015-09-08 15:09:20 +0300 | [diff] [blame] | 17 | void *pages; |
| 18 | size_t leadsize; |
Jason Evans | c2f970c | 2016-05-05 17:45:02 -0700 | [diff] [blame] | 19 | pages = pages_map(NULL, alloc_size, commit); |
Mike Hommey | a19e87f | 2012-04-21 21:27:46 -0700 | [diff] [blame] | 20 | if (pages == NULL) |
| 21 | return (NULL); |
| 22 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - |
| 23 | (uintptr_t)pages; |
Jason Evans | c2f970c | 2016-05-05 17:45:02 -0700 | [diff] [blame] | 24 | ret = pages_trim(pages, alloc_size, leadsize, size, commit); |
Mike Hommey | a19e87f | 2012-04-21 21:27:46 -0700 | [diff] [blame] | 25 | } while (ret == NULL); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 26 | |
Jason Evans | 8f0e0eb | 2012-04-21 13:33:48 -0700 | [diff] [blame] | 27 | assert(ret != NULL); |
| 28 | *zero = true; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 29 | return (ret); |
| 30 | } |
| 31 | |
Jason Evans | 5ff709c | 2012-04-11 18:13:45 -0700 | [diff] [blame] | 32 | void * |
Jason Evans | c7a9a6c | 2016-02-24 17:18:44 -0800 | [diff] [blame] | 33 | chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, |
| 34 | bool *commit) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 35 | { |
| 36 | void *ret; |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 37 | size_t offset; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * Ideally, there would be a way to specify alignment to mmap() (like |
| 41 | * NetBSD has), but in the absence of such a feature, we have to work |
| 42 | * hard to efficiently create aligned mappings. The reliable, but |
| 43 | * slow method is to create a mapping that is over-sized, then trim the |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 44 | * excess. However, that always results in one or two calls to |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 45 | * pages_unmap(). |
| 46 | * |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 47 | * Optimistically try mapping precisely the right amount before falling |
| 48 | * back to the slow method, with the expectation that the optimistic |
| 49 | * approach works most of the time. |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 50 | */ |
| 51 | |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 52 | assert(alignment != 0); |
| 53 | assert((alignment & chunksize_mask) == 0); |
| 54 | |
Jason Evans | c2f970c | 2016-05-05 17:45:02 -0700 | [diff] [blame] | 55 | ret = pages_map(new_addr, size, commit); |
Jason Evans | c7a9a6c | 2016-02-24 17:18:44 -0800 | [diff] [blame] | 56 | if (ret == NULL || ret == new_addr) |
| 57 | return (ret); |
| 58 | assert(new_addr == NULL); |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 59 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); |
| 60 | if (offset != 0) { |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 61 | pages_unmap(ret, size); |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 62 | return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 63 | } |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 64 | |
Jason Evans | 8f0e0eb | 2012-04-21 13:33:48 -0700 | [diff] [blame] | 65 | assert(ret != NULL); |
| 66 | *zero = true; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 67 | return (ret); |
| 68 | } |
| 69 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 70 | bool |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 71 | chunk_dalloc_mmap(void *chunk, size_t size) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 72 | { |
| 73 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 74 | if (config_munmap) |
| 75 | pages_unmap(chunk, size); |
| 76 | |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 77 | return (!config_munmap); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 78 | } |