xref: /aosp_15_r20/external/sandboxed-api/sandboxed_api/sandbox2/util/bpf_helper.h (revision ec63e07ab9515d95e79c211197c445ef84cefa6a)
1 // Copyright 2019 Google LLC
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // Wrapper around BPF macros, modified from the Chromium OS version. The
16 // original notice is below.
17 //
18 // Copyright (c) 2012 The Chromium OS Authors <[email protected]>
19 // Author: Will Drewry <[email protected]>
20 //
21 // The code may be used by anyone for any purpose,
22 // and can serve as a starting point for developing
23 // applications using prctl(PR_SET_SECCOMP, 2, ...).
24 //
25 // No guarantees are provided with respect to the correctness
26 // or functionality of this code.
27 
28 #ifndef SANDBOXED_API_SANDBOX2_UTIL_BPF_HELPER_H_
29 #define SANDBOXED_API_SANDBOX2_UTIL_BPF_HELPER_H_
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #include <asm/bitsperlong.h>	/* for __BITS_PER_LONG */
36 #include <endian.h>
37 #include <linux/filter.h>
38 #include <linux/seccomp.h>	/* for seccomp_data */
39 #include <linux/types.h>
40 #include <linux/unistd.h>
41 #include <stddef.h>
42 
43 #define BPF_LABELS_MAX 256
44 struct bpf_labels {
45 	int count;
46 	struct __bpf_label {
47 		const char *label;
48 		__u32 location;
49 	} labels[BPF_LABELS_MAX];
50 };
51 
52 int bpf_resolve_jumps(struct bpf_labels *labels,
53 		      struct sock_filter *filter, size_t count);
54 __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
55 void seccomp_bpf_print(struct sock_filter *filter, size_t count);
56 
57 #define JUMP_JT 0xff
58 #define JUMP_JF 0xff
59 #define LABEL_JT 0xfe
60 #define LABEL_JF 0xfe
61 
62 #define DENY \
63 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
64 /* A synonym of of DENY */
65 #define KILL \
66 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
67 #define TRAP(val) \
68 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP | (val & SECCOMP_RET_DATA))
69 #define ERRNO(val) \
70 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ERRNO | (val & SECCOMP_RET_DATA))
71 #define TRACE(val) \
72 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRACE | (val & SECCOMP_RET_DATA))
73 #define ALLOW \
74 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
75 
76 #define JUMP(labels, label) \
77 	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
78 		 JUMP_JT, JUMP_JF)
79 #define LABEL(labels, label) \
80 	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
81 		 LABEL_JT, LABEL_JF)
82 #define SYSCALL(nr, jt) \
83 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
84 	jt
85 
86 /* Lame, but just an example */
87 #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
88 
89 #define EXPAND(...) __VA_ARGS__
90 
91 /* Ensure that we load the logically correct offset. */
92 #if __BYTE_ORDER == __LITTLE_ENDIAN
93 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
94 #elif __BYTE_ORDER == __BIG_ENDIAN
95 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
96 #else
97 #error "Unknown endianness"
98 #endif
99 
100 /* Map all width-sensitive operations */
101 #if __BITS_PER_LONG == 32
102 
103 #define JEQ(x, jt) JEQ32(x, EXPAND(jt))
104 #define JNE(x, jt) JNE32(x, EXPAND(jt))
105 #define JGT(x, jt) JGT32(x, EXPAND(jt))
106 #define JLT(x, jt) JLT32(x, EXPAND(jt))
107 #define JGE(x, jt) JGE32(x, EXPAND(jt))
108 #define JLE(x, jt) JLE32(x, EXPAND(jt))
109 #define JA(x, jt) JA32(x, EXPAND(jt))
110 #define ARG(i) ARG_32(i)
111 
112 #elif __BITS_PER_LONG == 64
113 
114 /* Ensure that we load the logically correct offset. */
115 #if __BYTE_ORDER == __LITTLE_ENDIAN
116 #define ENDIAN(_lo, _hi) _lo, _hi
117 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
118 #elif __BYTE_ORDER == __BIG_ENDIAN
119 #define ENDIAN(_lo, _hi) _hi, _lo
120 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
121 #endif
122 
123 union arg64 {
124 	struct {
125 		__u32 ENDIAN(lo32, hi32);
126 	};
127 	__u64 u64;
128 };
129 
130 #define JEQ(x, jt) \
131 	JEQ64(((union arg64){.u64 = (x)}).lo32, \
132 	      ((union arg64){.u64 = (x)}).hi32, \
133 	      EXPAND(jt))
134 #define JGT(x, jt) \
135 	JGT64(((union arg64){.u64 = (x)}).lo32, \
136 	      ((union arg64){.u64 = (x)}).hi32, \
137 	      EXPAND(jt))
138 #define JGE(x, jt) \
139 	JGE64(((union arg64){.u64 = (x)}).lo32, \
140 	      ((union arg64){.u64 = (x)}).hi32, \
141 	      EXPAND(jt))
142 #define JNE(x, jt) \
143 	JNE64(((union arg64){.u64 = (x)}).lo32, \
144 	      ((union arg64){.u64 = (x)}).hi32, \
145 	      EXPAND(jt))
146 #define JLT(x, jt) \
147 	JLT64(((union arg64){.u64 = (x)}).lo32, \
148 	      ((union arg64){.u64 = (x)}).hi32, \
149 	      EXPAND(jt))
150 #define JLE(x, jt) \
151 	JLE64(((union arg64){.u64 = (x)}).lo32, \
152 	      ((union arg64){.u64 = (x)}).hi32, \
153 	      EXPAND(jt))
154 
155 #define JA(x, jt) \
156 	JA64(((union arg64){.u64 = (x)}).lo32, \
157 	       ((union arg64){.u64 = (x)}).hi32, \
158 	       EXPAND(jt))
159 #define ARG(i) ARG_64(i)
160 
161 #else
162 #error __BITS_PER_LONG value unusable.
163 #endif
164 
165 /* Loads the arg into A */
166 #define ARG_32(idx) \
167 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
168 
169 /* Loads lo into M[0] and hi into M[1] and A */
170 #define ARG_64(idx) \
171 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
172 	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
173 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
174 	BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
175 
176 #define JEQ32(value, jt) \
177 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
178 	jt
179 
180 #define JNE32(value, jt) \
181 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
182 	jt
183 
184 #define JA32(value, jt) \
185 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
186 	jt
187 
188 #define JGE32(value, jt) \
189 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
190 	jt
191 
192 #define JGT32(value, jt) \
193 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
194 	jt
195 
196 #define JLE32(value, jt) \
197 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
198 	jt
199 
200 #define JLT32(value, jt) \
201 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
202 	jt
203 
204 /*
205  * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
206  * A and M[1]. This invariant is kept by restoring A if necessary.
207  */
208 #define JEQ64(lo, hi, jt) \
209 	/* if (hi != arg.hi) goto NOMATCH; */ \
210 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
211 	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
212 	/* if (lo != arg.lo) goto NOMATCH; */ \
213 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
214 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
215 	jt, \
216 	BPF_STMT(BPF_LD+BPF_MEM, 1)
217 
218 #define JNE64(lo, hi, jt) \
219 	/* if (hi != arg.hi) goto MATCH; */ \
220 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
221 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
222 	/* if (lo != arg.lo) goto MATCH; */ \
223 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
224 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
225 	jt, \
226 	BPF_STMT(BPF_LD+BPF_MEM, 1)
227 
228 #define JA64(lo, hi, jt) \
229 	/* if (hi & arg.hi) goto MATCH; */ \
230 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
231 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
232 	/* if (lo & arg.lo) goto MATCH; */ \
233 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
234 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
235 	jt, \
236 	BPF_STMT(BPF_LD+BPF_MEM, 1)
237 
238 #define JGE64(lo, hi, jt) \
239 	/* if (hi > arg.hi) goto MATCH; */ \
240 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
241 	/* if (hi != arg.hi) goto NOMATCH; */ \
242 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
243 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
244 	/* if (lo >= arg.lo) goto MATCH; */ \
245 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
246 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
247 	jt, \
248 	BPF_STMT(BPF_LD+BPF_MEM, 1)
249 
250 #define JGT64(lo, hi, jt) \
251 	/* if (hi > arg.hi) goto MATCH; */ \
252 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
253 	/* if (hi != arg.hi) goto NOMATCH; */ \
254 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
255 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
256 	/* if (lo > arg.lo) goto MATCH; */ \
257 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
258 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
259 	jt, \
260 	BPF_STMT(BPF_LD+BPF_MEM, 1)
261 
262 #define JLE64(lo, hi, jt) \
263 	/* if (hi < arg.hi) goto MATCH; */ \
264 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
265 	/* if (hi != arg.hi) goto NOMATCH; */ \
266 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
267 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
268 	/* if (lo <= arg.lo) goto MATCH; */ \
269 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
270 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
271 	jt, \
272 	BPF_STMT(BPF_LD+BPF_MEM, 1)
273 
274 #define JLT64(lo, hi, jt) \
275 	/* if (hi < arg.hi) goto MATCH; */ \
276 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
277 	/* if (hi != arg.hi) goto NOMATCH; */ \
278 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
279 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
280 	/* if (lo < arg.lo) goto MATCH; */ \
281 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
282 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
283 	jt, \
284 	BPF_STMT(BPF_LD+BPF_MEM, 1)
285 
286 #define LOAD_SYSCALL_NR \
287 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
288 		 offsetof(struct seccomp_data, nr))
289 
290 #define LOAD_ARCH \
291         BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
292                 offsetof(struct seccomp_data, arch))
293 
294 #ifdef __cplusplus
295 }
296 #endif
297 
298 #endif  // SANDBOXED_API_SANDBOX2_UTIL_BPF_HELPER_H_
299