blob: ff8de2fe916cb2f193f16b1e308e69586fffb63a [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_EXTENT_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5
Jason Evansec8f0992017-02-26 12:58:15 -08006#ifndef JEMALLOC_JET
7static
8#endif
9size_t
10extent_size_quantize_floor(size_t size) {
Jason Evans2cdf07a2016-11-11 21:14:29 -080011 size_t ret;
12 szind_t ind;
Jason Evans8a03cf02015-05-04 09:58:36 -070013
Jason Evans2cdf07a2016-11-11 21:14:29 -080014 assert(size > 0);
15
16 ind = size2index(size + 1);
Jason Evans1aeea0f2016-11-11 22:46:55 -080017 if (ind == 0) {
18 /* Avoid underflow. */
19 return (index2size(0));
Jason Evans2cdf07a2016-11-11 21:14:29 -080020 }
21 ret = index2size(ind - 1);
22 assert(ret <= size);
23 return (ret);
Jason Evans8a03cf02015-05-04 09:58:36 -070024}
25
Jason Evansec8f0992017-02-26 12:58:15 -080026size_t
27extent_size_quantize_ceil(size_t size) {
28 size_t ret;
29
30 assert(size > 0);
31
32 ret = extent_size_quantize_floor(size);
33 if (ret < size) {
34 /*
35 * Skip a quantization that may have an adequately large extent,
36 * because under-sized extents may be mixed in. This only
37 * happens when an unusual size is requested, i.e. for aligned
38 * allocation, and is just one of several places where linear
39 * search would potentially find sufficiently aligned available
40 * memory somewhere lower.
41 */
42 ret = index2size(size2index(ret + 1));
43 }
44 return ret;
45}
46
Jason Evansaf1f5922014-10-30 16:38:08 -070047JEMALLOC_INLINE_C int
Jason Evans5c77af92016-11-14 18:27:23 -080048extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080049{
Jason Evansec8f0992017-02-26 12:58:15 -080050 size_t a_qsize = extent_size_quantize_floor(extent_node_size_get(a));
51 size_t b_qsize = extent_size_quantize_floor(extent_node_size_get(b));
Jason Evanse476f8a2010-01-16 09:53:50 -080052
Jason Evans5c77af92016-11-14 18:27:23 -080053 return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
Jason Evanse476f8a2010-01-16 09:53:50 -080054}
55
Jason Evans5c77af92016-11-14 18:27:23 -080056JEMALLOC_INLINE_C int
57extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
58{
Jason Evans23794792016-11-15 13:47:22 -080059 size_t a_sn = extent_node_sn_get(a);
60 size_t b_sn = extent_node_sn_get(b);
Jason Evans5c77af92016-11-14 18:27:23 -080061
62 return ((a_sn > b_sn) - (a_sn < b_sn));
63}
Jason Evanse476f8a2010-01-16 09:53:50 -080064
Jason Evansaf1f5922014-10-30 16:38:08 -070065JEMALLOC_INLINE_C int
Joshua Kahn13b40152015-09-18 16:58:17 -040066extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
Jason Evanse476f8a2010-01-16 09:53:50 -080067{
Jason Evansee41ad42015-02-15 18:04:46 -080068 uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
69 uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
Jason Evanse476f8a2010-01-16 09:53:50 -080070
71 return ((a_addr > b_addr) - (a_addr < b_addr));
72}
73
Jason Evans5c77af92016-11-14 18:27:23 -080074JEMALLOC_INLINE_C int
75extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
76{
77 int ret;
78
79 ret = extent_sz_comp(a, b);
80 if (ret != 0)
81 return (ret);
82
83 ret = extent_sn_comp(a, b);
84 if (ret != 0)
85 return (ret);
86
87 ret = extent_ad_comp(a, b);
88 return (ret);
89}
90
91/* Generate red-black tree functions. */
92rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
93 extent_szsnad_comp)
94
Jason Evansf3ff7522010-02-28 15:00:18 -080095/* Generate red-black tree functions. */
Jason Evans2195ba42015-02-15 16:43:52 -080096rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)