xref: /aosp_15_r20/external/erofs-utils/include/erofs/defs.h (revision 33b1fccf6a0fada2c2875d400ed01119b7676ee5)
1 /* SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0 */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Li Guifu <[email protected]>
6  * Modified by Gao Xiang <[email protected]>
7  */
8 #ifndef __EROFS_DEFS_H
9 #define __EROFS_DEFS_H
10 
11 #ifdef __cplusplus
12 extern "C"
13 {
14 #endif
15 
16 #include <stddef.h>
17 #include <stdint.h>
18 #include <assert.h>
19 #include <inttypes.h>
20 #include <limits.h>
21 #include <stdbool.h>
22 
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26 
27 #ifdef HAVE_LINUX_TYPES_H
28 #include <linux/types.h>
29 #endif
30 
31 /*
32  * container_of - cast a member of a structure out to the containing structure
33  * @ptr:	the pointer to the member.
34  * @type:	the type of the container struct this is embedded in.
35  * @member:	the name of the member within the struct.
36  */
37 #define container_of(ptr, type, member) ({			\
38 	const typeof(((type *)0)->member) *__mptr = (ptr);	\
39 	(type *)((char *)__mptr - offsetof(type, member)); })
40 
41 typedef uint8_t         u8;
42 typedef uint16_t        u16;
43 typedef uint32_t        u32;
44 typedef uint64_t        u64;
45 
46 #ifndef HAVE_LINUX_TYPES_H
47 typedef u8	__u8;
48 typedef u16	__u16;
49 typedef u32	__u32;
50 typedef u64	__u64;
51 typedef u16	__le16;
52 typedef u32	__le32;
53 typedef u64	__le64;
54 typedef u16	__be16;
55 typedef u32	__be32;
56 typedef u64	__be64;
57 #endif
58 
59 typedef int8_t          s8;
60 typedef int16_t         s16;
61 typedef int32_t         s32;
62 typedef int64_t         s64;
63 
64 #if __BYTE_ORDER == __LITTLE_ENDIAN
65 /*
66  * The host byte order is the same as network byte order,
67  * so these functions are all just identity.
68  */
69 #define cpu_to_le16(x) ((__u16)(x))
70 #define cpu_to_le32(x) ((__u32)(x))
71 #define cpu_to_le64(x) ((__u64)(x))
72 #define le16_to_cpu(x) ((__u16)(x))
73 #define le32_to_cpu(x) ((__u32)(x))
74 #define le64_to_cpu(x) ((__u64)(x))
75 
76 #else
77 #if __BYTE_ORDER == __BIG_ENDIAN
78 #define cpu_to_le16(x) (__builtin_bswap16(x))
79 #define cpu_to_le32(x) (__builtin_bswap32(x))
80 #define cpu_to_le64(x) (__builtin_bswap64(x))
81 #define le16_to_cpu(x) (__builtin_bswap16(x))
82 #define le32_to_cpu(x) (__builtin_bswap32(x))
83 #define le64_to_cpu(x) (__builtin_bswap64(x))
84 #else
85 #pragma error
86 #endif
87 #endif
88 
89 #ifdef __cplusplus
90 #define BUILD_BUG_ON(condition) static_assert(!(condition))
91 #elif !defined(__OPTIMIZE__)
92 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
93 #else
94 #define BUILD_BUG_ON(condition) assert(!(condition))
95 #endif
96 
97 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
98 
99 #define __round_mask(x, y)      ((__typeof__(x))((y)-1))
100 #define round_up(x, y)          ((((x)-1) | __round_mask(x, y))+1)
101 #define round_down(x, y)        ((x) & ~__round_mask(x, y))
102 
103 #ifndef roundup
104 /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
105 #define roundup(x, y) (					\
106 {							\
107 	const typeof(y) __y = y;			\
108 	(((x) + (__y - 1)) / __y) * __y;		\
109 }							\
110 )
111 #endif
112 #define rounddown(x, y) (				\
113 {							\
114 	typeof(x) __x = (x);				\
115 	__x - (__x % (y));				\
116 }							\
117 )
118 
119 /* Can easily conflict with C++'s std::min */
120 #ifndef __cplusplus
121 #define min(x, y) ({				\
122 	typeof(x) _min1 = (x);			\
123 	typeof(y) _min2 = (y);			\
124 	(void) (&_min1 == &_min2);		\
125 	_min1 < _min2 ? _min1 : _min2; })
126 
127 #define max(x, y) ({				\
128 	typeof(x) _max1 = (x);			\
129 	typeof(y) _max2 = (y);			\
130 	(void) (&_max1 == &_max2);		\
131 	_max1 > _max2 ? _max1 : _max2; })
132 #endif
133 
134 /*
135  * ..and if you can't take the strict types, you can specify one yourself.
136  * Or don't use min/max at all, of course.
137  */
138 #define min_t(type, x, y) ({			\
139 	type __min1 = (x);			\
140 	type __min2 = (y);			\
141 	__min1 < __min2 ? __min1: __min2; })
142 
143 #define max_t(type, x, y) ({			\
144 	type __max1 = (x);			\
145 	type __max2 = (y);			\
146 	__max1 > __max2 ? __max1: __max2; })
147 
148 #define cmpsgn(x, y) ({		\
149 	typeof(x) _x = (x);	\
150 	typeof(y) _y = (y);	\
151 	(_x > _y) - (_x < _y); })
152 
153 #define ARRAY_SIZE(arr)	(sizeof(arr) / sizeof((arr)[0]))
154 
155 #define BIT(nr)             (1UL << (nr))
156 #define BIT_ULL(nr)         (1ULL << (nr))
157 #define BIT_MASK(nr)        (1UL << ((nr) % BITS_PER_LONG))
158 #define BIT_WORD(nr)        ((nr) / BITS_PER_LONG)
159 #define BIT_ULL_MASK(nr)    (1ULL << ((nr) % BITS_PER_LONG_LONG))
160 #define BIT_ULL_WORD(nr)    ((nr) / BITS_PER_LONG_LONG)
161 #define BITS_PER_BYTE       8
162 #define BITS_TO_LONGS(nr)   DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
163 
164 #ifdef __SIZEOF_LONG__
165 #define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
166 #else
167 #define BITS_PER_LONG __WORDSIZE
168 #endif
169 
170 #define BUG_ON(cond)        assert(!(cond))
171 
172 #ifdef NDEBUG
173 #define DBG_BUGON(condition)	((void)(condition))
174 #else
175 #define DBG_BUGON(condition)	BUG_ON(condition)
176 #endif
177 
178 #ifndef __maybe_unused
179 #define __maybe_unused      __attribute__((__unused__))
180 #endif
181 
182 #define __packed __attribute__((__packed__))
183 
184 #define __get_unaligned_t(type, ptr) ({						\
185 	const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);	\
186 	__pptr->x;								\
187 })
188 
189 #define __put_unaligned_t(type, val, ptr) do {					\
190 	struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);		\
191 	__pptr->x = (val);							\
192 } while (0)
193 
194 #define get_unaligned(ptr)	__get_unaligned_t(typeof(*(ptr)), (ptr))
195 #define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
196 
get_unaligned_le32(const void * p)197 static inline u32 get_unaligned_le32(const void *p)
198 {
199 	return le32_to_cpu(__get_unaligned_t(__le32, p));
200 }
201 
put_unaligned_le32(u32 val,void * p)202 static inline void put_unaligned_le32(u32 val, void *p)
203 {
204 	__put_unaligned_t(__le32, cpu_to_le32(val), p);
205 }
206 
get_unaligned_le64(const void * p)207 static inline u32 get_unaligned_le64(const void *p)
208 {
209 	return le64_to_cpu(__get_unaligned_t(__le64, p));
210 }
211 
212 /**
213  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
214  * @n - parameter
215  *
216  * constant-capable log of base 2 calculation
217  * - this can be used to initialise global variables from constant data, hence
218  *   the massive ternary operator construction
219  *
220  * selects the appropriately-sized optimised version depending on sizeof(n)
221  */
222 #define ilog2(n)			\
223 (					\
224 	(n) & (1ULL << 63) ? 63 :	\
225 	(n) & (1ULL << 62) ? 62 :	\
226 	(n) & (1ULL << 61) ? 61 :	\
227 	(n) & (1ULL << 60) ? 60 :	\
228 	(n) & (1ULL << 59) ? 59 :	\
229 	(n) & (1ULL << 58) ? 58 :	\
230 	(n) & (1ULL << 57) ? 57 :	\
231 	(n) & (1ULL << 56) ? 56 :	\
232 	(n) & (1ULL << 55) ? 55 :	\
233 	(n) & (1ULL << 54) ? 54 :	\
234 	(n) & (1ULL << 53) ? 53 :	\
235 	(n) & (1ULL << 52) ? 52 :	\
236 	(n) & (1ULL << 51) ? 51 :	\
237 	(n) & (1ULL << 50) ? 50 :	\
238 	(n) & (1ULL << 49) ? 49 :	\
239 	(n) & (1ULL << 48) ? 48 :	\
240 	(n) & (1ULL << 47) ? 47 :	\
241 	(n) & (1ULL << 46) ? 46 :	\
242 	(n) & (1ULL << 45) ? 45 :	\
243 	(n) & (1ULL << 44) ? 44 :	\
244 	(n) & (1ULL << 43) ? 43 :	\
245 	(n) & (1ULL << 42) ? 42 :	\
246 	(n) & (1ULL << 41) ? 41 :	\
247 	(n) & (1ULL << 40) ? 40 :	\
248 	(n) & (1ULL << 39) ? 39 :	\
249 	(n) & (1ULL << 38) ? 38 :	\
250 	(n) & (1ULL << 37) ? 37 :	\
251 	(n) & (1ULL << 36) ? 36 :	\
252 	(n) & (1ULL << 35) ? 35 :	\
253 	(n) & (1ULL << 34) ? 34 :	\
254 	(n) & (1ULL << 33) ? 33 :	\
255 	(n) & (1ULL << 32) ? 32 :	\
256 	(n) & (1ULL << 31) ? 31 :	\
257 	(n) & (1ULL << 30) ? 30 :	\
258 	(n) & (1ULL << 29) ? 29 :	\
259 	(n) & (1ULL << 28) ? 28 :	\
260 	(n) & (1ULL << 27) ? 27 :	\
261 	(n) & (1ULL << 26) ? 26 :	\
262 	(n) & (1ULL << 25) ? 25 :	\
263 	(n) & (1ULL << 24) ? 24 :	\
264 	(n) & (1ULL << 23) ? 23 :	\
265 	(n) & (1ULL << 22) ? 22 :	\
266 	(n) & (1ULL << 21) ? 21 :	\
267 	(n) & (1ULL << 20) ? 20 :	\
268 	(n) & (1ULL << 19) ? 19 :	\
269 	(n) & (1ULL << 18) ? 18 :	\
270 	(n) & (1ULL << 17) ? 17 :	\
271 	(n) & (1ULL << 16) ? 16 :	\
272 	(n) & (1ULL << 15) ? 15 :	\
273 	(n) & (1ULL << 14) ? 14 :	\
274 	(n) & (1ULL << 13) ? 13 :	\
275 	(n) & (1ULL << 12) ? 12 :	\
276 	(n) & (1ULL << 11) ? 11 :	\
277 	(n) & (1ULL << 10) ? 10 :	\
278 	(n) & (1ULL <<  9) ?  9 :	\
279 	(n) & (1ULL <<  8) ?  8 :	\
280 	(n) & (1ULL <<  7) ?  7 :	\
281 	(n) & (1ULL <<  6) ?  6 :	\
282 	(n) & (1ULL <<  5) ?  5 :	\
283 	(n) & (1ULL <<  4) ?  4 :	\
284 	(n) & (1ULL <<  3) ?  3 :	\
285 	(n) & (1ULL <<  2) ?  2 :	\
286 	(n) & (1ULL <<  1) ?  1 : 0	\
287 )
288 
fls_long(unsigned long x)289 static inline unsigned int fls_long(unsigned long x)
290 {
291 	return x ? sizeof(x) * 8 - __builtin_clzl(x) : 0;
292 }
293 
lowbit(unsigned long n)294 static inline unsigned long lowbit(unsigned long n)
295 {
296 	return n & -n;
297 }
298 
299 /**
300  * __roundup_pow_of_two() - round up to nearest power of two
301  * @n: value to round up
302  */
303 static inline __attribute__((const))
__roundup_pow_of_two(unsigned long n)304 unsigned long __roundup_pow_of_two(unsigned long n)
305 {
306 	return 1UL << fls_long(n - 1);
307 }
308 
309 /**
310  * roundup_pow_of_two - round the given value up to nearest power of two
311  * @n: parameter
312  *
313  * round the given value up to the nearest power of two
314  * - the result is undefined when n == 0
315  * - this can be used to initialise global variables from constant data
316  */
317 #define roundup_pow_of_two(n)			\
318 (						\
319 	__builtin_constant_p(n) ? (		\
320 		((n) == 1) ? 1 :		\
321 		(1UL << (ilog2((n) - 1) + 1))	\
322 				   ) :		\
323 	__roundup_pow_of_two(n)			\
324 )
325 
326 #ifndef __always_inline
327 #define __always_inline	inline
328 #endif
329 
330 #ifdef HAVE_STRUCT_STAT_ST_ATIM
331 /* Linux */
332 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atim.tv_nsec)
333 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctim.tv_nsec)
334 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtim.tv_nsec)
335 #define ST_MTIM_NSEC_SET(stbuf, val) (stbuf)->st_mtim.tv_nsec = (val)
336 #elif defined(HAVE_STRUCT_STAT_ST_ATIMENSEC)
337 /* macOS */
338 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atimensec)
339 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctimensec)
340 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtimensec)
341 #define ST_MTIM_NSEC_SET(stbuf, val) (stbuf)->st_mtimensec = (val)
342 #else
343 #define ST_ATIM_NSEC(stbuf) 0
344 #define ST_CTIM_NSEC(stbuf) 0
345 #define ST_MTIM_NSEC(stbuf) 0
346 #define ST_MTIM_NSEC_SET(stbuf, val) do { } while (0)
347 #endif
348 
349 #define __erofs_likely(x)      __builtin_expect(!!(x), 1)
350 #define __erofs_unlikely(x)    __builtin_expect(!!(x), 0)
351 
352 #ifdef __cplusplus
353 }
354 #endif
355 
356 #endif
357