blob: 230d2383b16df2ce66eb5b7c04824d21dd0524f4 [file] [log] [blame]
Jon West96501cc2021-04-06 13:09:18 -04001/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _LINUX_BITFIELD_H
16#define _LINUX_BITFIELD_H
17
18#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
19#include <linux/build_bug.h>
20#endif
21#include <asm/byteorder.h>
22
23/*
24 * Bitfield access macros
25 *
26 * FIELD_{GET,PREP} macros take as first parameter shifted mask
27 * from which they extract the base mask and shift amount.
28 * Mask must be a compilation time constant.
29 *
30 * Example:
31 *
32 * #define REG_FIELD_A GENMASK(6, 0)
33 * #define REG_FIELD_B BIT(7)
34 * #define REG_FIELD_C GENMASK(15, 8)
35 * #define REG_FIELD_D GENMASK(31, 16)
36 *
37 * Get:
38 * a = FIELD_GET(REG_FIELD_A, reg);
39 * b = FIELD_GET(REG_FIELD_B, reg);
40 *
41 * Set:
42 * reg = FIELD_PREP(REG_FIELD_A, 1) |
43 * FIELD_PREP(REG_FIELD_B, 0) |
44 * FIELD_PREP(REG_FIELD_C, c) |
45 * FIELD_PREP(REG_FIELD_D, 0x40);
46 *
47 * Modify:
48 * reg &= ~REG_FIELD_C;
49 * reg |= FIELD_PREP(REG_FIELD_C, c);
50 */
51
52#define __bf_shf(x) (__builtin_ffsll(x) - 1)
53
54#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
55 ({ \
56 BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
57 _pfx "mask is not constant"); \
58 BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
59 BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
60 ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
61 _pfx "value too large for the field"); \
62 BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
63 _pfx "type of reg too small for mask"); \
64 __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
65 (1ULL << __bf_shf(_mask))); \
66 })
67
68/**
69 * FIELD_FIT() - check if value fits in the field
70 * @_mask: shifted mask defining the field's length and position
71 * @_val: value to test against the field
72 *
73 * Return: true if @_val can fit inside @_mask, false if @_val is too big.
74 */
75#define FIELD_FIT(_mask, _val) \
76 ({ \
77 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
78 !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
79 })
80
81/**
82 * FIELD_PREP() - prepare a bitfield element
83 * @_mask: shifted mask defining the field's length and position
84 * @_val: value to put in the field
85 *
86 * FIELD_PREP() masks and shifts up the value. The result should
87 * be combined with other fields of the bitfield using logical OR.
88 */
89#define FIELD_PREP(_mask, _val) \
90 ({ \
91 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
92 ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
93 })
94
95/**
96 * FIELD_GET() - extract a bitfield element
97 * @_mask: shifted mask defining the field's length and position
98 * @_reg: value of entire bitfield
99 *
100 * FIELD_GET() extracts the field specified by @_mask from the
101 * bitfield passed in as @_reg by masking and shifting it down.
102 */
103#define FIELD_GET(_mask, _reg) \
104 ({ \
105 __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
106 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
107 })
108
109extern void __compiletime_error("value doesn't fit into mask")
110__field_overflow(void);
111extern void __compiletime_error("bad bitfield mask")
112__bad_mask(void);
113static __always_inline u64 field_multiplier(u64 field)
114{
115 if ((field | (field - 1)) & ((field | (field - 1)) + 1))
116 __bad_mask();
117 return field & -field;
118}
119static __always_inline u64 field_mask(u64 field)
120{
121 return field / field_multiplier(field);
122}
123#define ____MAKE_OP(type,base,to,from) \
124static __always_inline __##type type##_encode_bits(base v, base field) \
125{ \
126 if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
127 __field_overflow(); \
128 return to((v & field_mask(field)) * field_multiplier(field)); \
129} \
130static __always_inline __##type type##_replace_bits(__##type old, \
131 base val, base field) \
132{ \
133 return (old & ~to(field)) | type##_encode_bits(val, field); \
134} \
135static __always_inline void type##p_replace_bits(__##type *p, \
136 base val, base field) \
137{ \
138 *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
139} \
140static __always_inline base type##_get_bits(__##type v, base field) \
141{ \
142 return (from(v) & field)/field_multiplier(field); \
143}
144#define __MAKE_OP(size) \
145 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
146 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
147 ____MAKE_OP(u##size,u##size,,)
148____MAKE_OP(u8,u8,,)
149__MAKE_OP(16)
150__MAKE_OP(32)
151__MAKE_OP(64)
152#undef __MAKE_OP
153#undef ____MAKE_OP
154
155#endif