1 /*
2  * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #ifndef UTILS_DEF_H
9 #define UTILS_DEF_H
10 
11 #include <export/lib/utils_def_exp.h>
12 
13 /* Compute the number of elements in the given array */
14 #define ARRAY_SIZE(a)				\
15 	(sizeof(a) / sizeof((a)[0]))
16 
17 #define IS_POWER_OF_TWO(x)			\
18 	(((x) & ((x) - 1)) == 0)
19 
20 #define SIZE_FROM_LOG2_WORDS(n)		(U(4) << (n))
21 
22 #define BIT_32(nr)			(U(1) << (nr))
23 #define BIT_64(nr)			(ULL(1) << (nr))
24 
25 #ifdef __aarch64__
26 #define BIT				BIT_64
27 #else
28 #define BIT				BIT_32
29 #endif
30 
31 /*
32  * Create a contiguous bitmask starting at bit position @l and ending at
33  * position @h. For example
34  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
35  */
36 #if defined(__LINKER__) || defined(__ASSEMBLER__)
37 #define GENMASK_32(h, l) \
38 	(((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
39 
40 #define GENMASK_64(h, l) \
41 	((~0 << (l)) & (~0 >> (64 - 1 - (h))))
42 #else
43 #define GENMASK_32(h, l) \
44 	(((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
45 
46 #define GENMASK_64(h, l) \
47 	(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
48 #endif
49 
50 #ifdef __aarch64__
51 #define GENMASK				GENMASK_64
52 #else
53 #define GENMASK				GENMASK_32
54 #endif
55 
56 #define HI(addr)			(addr >> 32)
57 #define LO(addr)			(addr & 0xffffffff)
58 
59 /*
60  * This variant of div_round_up can be used in macro definition but should not
61  * be used in C code as the `div` parameter is evaluated twice.
62  */
63 #define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
64 
65 #define div_round_up(val, div) __extension__ ({	\
66 	__typeof__(div) _div = (div);		\
67 	((val) + _div - (__typeof__(div)) 1) / _div;		\
68 })
69 
70 #define MIN(x, y) __extension__ ({	\
71 	__typeof__(x) _x = (x);		\
72 	__typeof__(y) _y = (y);		\
73 	(void)(&_x == &_y);		\
74 	(_x < _y) ? _x : _y;		\
75 })
76 
77 #define MAX(x, y) __extension__ ({	\
78 	__typeof__(x) _x = (x);		\
79 	__typeof__(y) _y = (y);		\
80 	(void)(&_x == &_y);		\
81 	(_x > _y) ? _x : _y;		\
82 })
83 
84 #define CLAMP(x, min, max) __extension__ ({ \
85 	__typeof__(x) _x = (x); \
86 	__typeof__(min) _min = (min); \
87 	__typeof__(max) _max = (max); \
88 	(void)(&_x == &_min); \
89 	(void)(&_x == &_max); \
90 	((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
91 })
92 
93 /*
94  * The round_up() macro rounds up a value to the given boundary in a
95  * type-agnostic yet type-safe manner. The boundary must be a power of two.
96  * In other words, it computes the smallest multiple of boundary which is
97  * greater than or equal to value.
98  *
99  * round_down() is similar but rounds the value down instead.
100  */
101 #define round_boundary(value, boundary)		\
102 	((__typeof__(value))((boundary) - 1))
103 
104 #define round_up(value, boundary)		\
105 	((((value) - 1) | round_boundary(value, boundary)) + 1)
106 
107 #define round_down(value, boundary)		\
108 	((value) & ~round_boundary(value, boundary))
109 
110 /* add operation together with checking whether the operation overflowed
111  * The result is '*res',
112  * return 0 on success and 1 on overflow
113  */
114 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
115 
116 /*
117  * Round up a value to align with a given size and
118  * check whether overflow happens.
119  * The rounduped value is '*res',
120  * return 0 on success and 1 on overflow
121  */
122 #define round_up_overflow(v, size, res) (__extension__({ \
123 	typeof(res) __res = res; \
124 	typeof(*(__res)) __roundup_tmp = 0; \
125 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
126 	\
127 	add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
128 		(void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
129 }))
130 
131 /*
132  * Add a with b, then round up the result to align with a given size and
133  * check whether overflow happens.
134  * The rounduped value is '*res',
135  * return 0 on success and 1 on overflow
136  */
137 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
138 	typeof(a) __a = (a); \
139 	typeof(__a) __add_res = 0; \
140 	\
141 	add_overflow((__a), (b), &__add_res) ? 1 : \
142 		round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
143 }))
144 
145 /**
146  * Helper macro to ensure a value lies on a given boundary.
147  */
148 #define is_aligned(value, boundary)			\
149 	(round_up((uintptr_t) value, boundary) ==	\
150 	 round_down((uintptr_t) value, boundary))
151 
152 /*
153  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
154  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
155  */
156 #define check_uptr_overflow(_ptr, _inc)		\
157 	((_ptr) > (UINTPTR_MAX - (_inc)))
158 
159 /*
160  * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
161  * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
162  */
163 #define check_u32_overflow(_u32, _inc) \
164 	((_u32) > (UINT32_MAX - (_inc)))
165 
166 /* Register size of the current architecture. */
167 #ifdef __aarch64__
168 #define REGSZ		U(8)
169 #else
170 #define REGSZ		U(4)
171 #endif
172 
173 /*
174  * Test for the current architecture version to be at least the version
175  * expected.
176  */
177 #define ARM_ARCH_AT_LEAST(_maj, _min) \
178 	((ARM_ARCH_MAJOR > (_maj)) || \
179 	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
180 
181 /*
182  * Import an assembly or linker symbol as a C expression with the specified
183  * type
184  */
185 #define IMPORT_SYM(type, sym, name) \
186 	extern char sym[];\
187 	static const __attribute__((unused)) type name = (type) sym;
188 
189 /*
190  * When the symbol is used to hold a pointer, its alignment can be asserted
191  * with this macro. For example, if there is a linker symbol that is going to
192  * be used as a 64-bit pointer, the value of the linker symbol must also be
193  * aligned to 64 bit. This macro makes sure this is the case.
194  */
195 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
196 
197 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
198 
199 /* Compiler builtin of GCC >= 9 and planned in llvm */
200 #ifdef __HAVE_SPECULATION_SAFE_VALUE
201 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
202 #else
203 # define SPECULATION_SAFE_VALUE(var) var
204 #endif
205 
206 /*
207  * Ticks elapsed in one second with a signal of 1 MHz
208  */
209 #define MHZ_TICKS_PER_SEC	U(1000000)
210 
211 /*
212  * Ticks elapsed in one second with a signal of 1 KHz
213  */
214 #define KHZ_TICKS_PER_SEC U(1000)
215 
216 #endif /* UTILS_DEF_H */
217