Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 1 | #define JEMALLOC_CHUNK_MMAP_C_ |
Jason Evans | 376b152 | 2010-02-11 14:45:59 -0800 | [diff] [blame] | 2 | #include "jemalloc/internal/jemalloc_internal.h" |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 3 | |
| 4 | /******************************************************************************/ |
Mike Hommey | 666c5bf | 2012-04-18 18:29:43 +0200 | [diff] [blame] | 5 | |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 6 | static void * |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 7 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 8 | { |
Dmitry-Me | 78ae1ac | 2015-09-08 15:09:20 +0300 | [diff] [blame] | 9 | void *ret; |
| 10 | size_t alloc_size; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 11 | |
Jason Evans | 5ff709c | 2012-04-11 18:13:45 -0700 | [diff] [blame] | 12 | alloc_size = size + alignment - PAGE; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 13 | /* Beware size_t wrap-around. */ |
Jason Evans | 5ff709c | 2012-04-11 18:13:45 -0700 | [diff] [blame] | 14 | if (alloc_size < size) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 15 | return (NULL); |
Mike Hommey | a19e87f | 2012-04-21 21:27:46 -0700 | [diff] [blame] | 16 | do { |
Dmitry-Me | 78ae1ac | 2015-09-08 15:09:20 +0300 | [diff] [blame] | 17 | void *pages; |
| 18 | size_t leadsize; |
Mike Hommey | a19e87f | 2012-04-21 21:27:46 -0700 | [diff] [blame] | 19 | pages = pages_map(NULL, alloc_size); |
| 20 | if (pages == NULL) |
| 21 | return (NULL); |
| 22 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - |
| 23 | (uintptr_t)pages; |
| 24 | ret = pages_trim(pages, alloc_size, leadsize, size); |
| 25 | } while (ret == NULL); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 26 | |
Jason Evans | 8f0e0eb | 2012-04-21 13:33:48 -0700 | [diff] [blame] | 27 | assert(ret != NULL); |
| 28 | *zero = true; |
Jason Evans | 03bf5b6 | 2015-08-12 10:26:54 -0700 | [diff] [blame] | 29 | if (!*commit) |
| 30 | *commit = pages_decommit(ret, size); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 31 | return (ret); |
| 32 | } |
| 33 | |
Jason Evans | 5ff709c | 2012-04-11 18:13:45 -0700 | [diff] [blame] | 34 | void * |
Christopher Ferris | e429403 | 2016-03-02 14:33:02 -0800 | [diff] [blame] | 35 | chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, |
| 36 | bool *commit) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 37 | { |
| 38 | void *ret; |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 39 | size_t offset; |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * Ideally, there would be a way to specify alignment to mmap() (like |
| 43 | * NetBSD has), but in the absence of such a feature, we have to work |
| 44 | * hard to efficiently create aligned mappings. The reliable, but |
| 45 | * slow method is to create a mapping that is over-sized, then trim the |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 46 | * excess. However, that always results in one or two calls to |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 47 | * pages_unmap(). |
| 48 | * |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 49 | * Optimistically try mapping precisely the right amount before falling |
| 50 | * back to the slow method, with the expectation that the optimistic |
| 51 | * approach works most of the time. |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 52 | */ |
| 53 | |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 54 | assert(alignment != 0); |
| 55 | assert((alignment & chunksize_mask) == 0); |
| 56 | |
Christopher Ferris | e429403 | 2016-03-02 14:33:02 -0800 | [diff] [blame] | 57 | ret = pages_map(new_addr, size); |
| 58 | if (ret == NULL || ret == new_addr) |
| 59 | return (ret); |
| 60 | assert(new_addr == NULL); |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 61 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); |
| 62 | if (offset != 0) { |
Jason Evans | de6fbdb | 2012-05-09 13:05:04 -0700 | [diff] [blame] | 63 | pages_unmap(ret, size); |
Jason Evans | 8fadb1a | 2015-08-04 10:49:46 -0700 | [diff] [blame] | 64 | return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); |
Jason Evans | a8f8d75 | 2012-04-21 19:17:21 -0700 | [diff] [blame] | 65 | } |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 66 | |
Jason Evans | 8f0e0eb | 2012-04-21 13:33:48 -0700 | [diff] [blame] | 67 | assert(ret != NULL); |
| 68 | *zero = true; |
Jason Evans | 03bf5b6 | 2015-08-12 10:26:54 -0700 | [diff] [blame] | 69 | if (!*commit) |
| 70 | *commit = pages_decommit(ret, size); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 71 | return (ret); |
| 72 | } |
| 73 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 74 | bool |
Jason Evans | e2deab7 | 2014-05-15 22:22:27 -0700 | [diff] [blame] | 75 | chunk_dalloc_mmap(void *chunk, size_t size) |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 76 | { |
| 77 | |
Jason Evans | 7ca0fdf | 2012-04-12 20:20:58 -0700 | [diff] [blame] | 78 | if (config_munmap) |
| 79 | pages_unmap(chunk, size); |
| 80 | |
Jason Evans | 551ebc4 | 2014-10-03 10:16:09 -0700 | [diff] [blame] | 81 | return (!config_munmap); |
Jason Evans | 4201af0 | 2010-01-24 02:53:40 -0800 | [diff] [blame] | 82 | } |