1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2019 Richard Palethorpe <[email protected]>
4 *
5 * Essential Extended Berkeley Packet Filter (eBPF) headers
6 *
7 * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
8 * some eBPF testing without any external dependencies.
9 */
10
11 #ifndef LAPI_BPF_H__
12 #define LAPI_BPF_H__
13
14 #include <stdint.h>
15
16 #include "lapi/syscalls.h"
17
18 /* Start copy from linux/bpf_(common).h */
19 #define BPF_CLASS(code) ((code) & 0x07)
20 #define BPF_LD 0x00
21 #define BPF_LDX 0x01
22 #define BPF_ST 0x02
23 #define BPF_STX 0x03
24 #define BPF_ALU 0x04
25 #define BPF_JMP 0x05
26
27 #define BPF_JNE 0x50 /* jump != */
28
29 #define BPF_SIZE(code) ((code) & 0x18)
30 #define BPF_B 0x10 /* 8-bit */
31 #define BPF_W 0x00 /* 32-bit */
32 #define BPF_DW 0x18 /* double word (64-bit) */
33
34 #define BPF_MODE(code) ((code) & 0xe0)
35 #define BPF_IMM 0x00
36 #define BPF_MEM 0x60
37
38 #define BPF_OP(code) ((code) & 0xf0)
39 #define BPF_ADD 0x00
40 #define BPF_SUB 0x10
41 #define BPF_MUL 0x20
42 #define BPF_DIV 0x30
43 #define BPF_LSH 0x60
44 #define BPF_RSH 0x70
45 #define BPF_MOD 0x90
46
47 #define BPF_JEQ 0x10
48
49 #define BPF_SRC(code) ((code) & 0x08)
50 #define BPF_K 0x00
51 #define BPF_X 0x08
52
53 #define BPF_ALU64 0x07 /* alu mode in double word width */
54 #define BPF_MOV 0xb0 /* mov reg to reg */
55 #define BPF_CALL 0x80 /* function call */
56 #define BPF_EXIT 0x90 /* function return */
57
58 /* Register numbers */
59 enum {
60 BPF_REG_0 = 0,
61 BPF_REG_1,
62 BPF_REG_2,
63 BPF_REG_3,
64 BPF_REG_4,
65 BPF_REG_5,
66 BPF_REG_6,
67 BPF_REG_7,
68 BPF_REG_8,
69 BPF_REG_9,
70 BPF_REG_10,
71 MAX_BPF_REG,
72 };
73
74 struct bpf_insn {
75 uint8_t code; /* opcode */
76 uint8_t dst_reg:4; /* dest register */
77 uint8_t src_reg:4; /* source register */
78 int16_t off; /* signed offset */
79 int32_t imm; /* signed immediate constant */
80 };
81
82 enum bpf_cmd {
83 BPF_MAP_CREATE,
84 BPF_MAP_LOOKUP_ELEM,
85 BPF_MAP_UPDATE_ELEM,
86 BPF_MAP_DELETE_ELEM,
87 BPF_MAP_GET_NEXT_KEY,
88 BPF_PROG_LOAD,
89 BPF_OBJ_PIN,
90 BPF_OBJ_GET,
91 BPF_PROG_ATTACH,
92 BPF_PROG_DETACH,
93 BPF_PROG_TEST_RUN,
94 BPF_PROG_GET_NEXT_ID,
95 BPF_MAP_GET_NEXT_ID,
96 BPF_PROG_GET_FD_BY_ID,
97 BPF_MAP_GET_FD_BY_ID,
98 BPF_OBJ_GET_INFO_BY_FD,
99 BPF_PROG_QUERY,
100 BPF_RAW_TRACEPOINT_OPEN,
101 BPF_BTF_LOAD,
102 BPF_BTF_GET_FD_BY_ID,
103 BPF_TASK_FD_QUERY,
104 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
105 BPF_MAP_FREEZE,
106 };
107
108 enum bpf_map_type {
109 BPF_MAP_TYPE_UNSPEC,
110 BPF_MAP_TYPE_HASH,
111 BPF_MAP_TYPE_ARRAY,
112 BPF_MAP_TYPE_PROG_ARRAY,
113 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
114 BPF_MAP_TYPE_PERCPU_HASH,
115 BPF_MAP_TYPE_PERCPU_ARRAY,
116 BPF_MAP_TYPE_STACK_TRACE,
117 BPF_MAP_TYPE_CGROUP_ARRAY,
118 BPF_MAP_TYPE_LRU_HASH,
119 BPF_MAP_TYPE_LRU_PERCPU_HASH,
120 BPF_MAP_TYPE_LPM_TRIE,
121 BPF_MAP_TYPE_ARRAY_OF_MAPS,
122 BPF_MAP_TYPE_HASH_OF_MAPS,
123 BPF_MAP_TYPE_DEVMAP,
124 BPF_MAP_TYPE_SOCKMAP,
125 BPF_MAP_TYPE_CPUMAP,
126 BPF_MAP_TYPE_XSKMAP,
127 BPF_MAP_TYPE_SOCKHASH,
128 BPF_MAP_TYPE_CGROUP_STORAGE,
129 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
130 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
131 BPF_MAP_TYPE_QUEUE,
132 BPF_MAP_TYPE_STACK,
133 BPF_MAP_TYPE_SK_STORAGE,
134 BPF_MAP_TYPE_DEVMAP_HASH,
135 BPF_MAP_TYPE_STRUCT_OPS,
136 BPF_MAP_TYPE_RINGBUF,
137 BPF_MAP_TYPE_INODE_STORAGE,
138 BPF_MAP_TYPE_TASK_STORAGE,
139 BPF_MAP_TYPE_BLOOM_FILTER,
140 };
141
142 enum bpf_prog_type {
143 BPF_PROG_TYPE_UNSPEC,
144 BPF_PROG_TYPE_SOCKET_FILTER,
145 BPF_PROG_TYPE_KPROBE,
146 BPF_PROG_TYPE_SCHED_CLS,
147 BPF_PROG_TYPE_SCHED_ACT,
148 BPF_PROG_TYPE_TRACEPOINT,
149 BPF_PROG_TYPE_XDP,
150 BPF_PROG_TYPE_PERF_EVENT,
151 BPF_PROG_TYPE_CGROUP_SKB,
152 BPF_PROG_TYPE_CGROUP_SOCK,
153 BPF_PROG_TYPE_LWT_IN,
154 BPF_PROG_TYPE_LWT_OUT,
155 BPF_PROG_TYPE_LWT_XMIT,
156 BPF_PROG_TYPE_SOCK_OPS,
157 BPF_PROG_TYPE_SK_SKB,
158 BPF_PROG_TYPE_CGROUP_DEVICE,
159 BPF_PROG_TYPE_SK_MSG,
160 BPF_PROG_TYPE_RAW_TRACEPOINT,
161 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
162 BPF_PROG_TYPE_LWT_SEG6LOCAL,
163 BPF_PROG_TYPE_LIRC_MODE2,
164 BPF_PROG_TYPE_SK_REUSEPORT,
165 BPF_PROG_TYPE_FLOW_DISSECTOR,
166 BPF_PROG_TYPE_CGROUP_SYSCTL,
167 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
168 BPF_PROG_TYPE_CGROUP_SOCKOPT,
169 };
170
171 #define BPF_PSEUDO_MAP_FD 1
172
173 #define BPF_OBJ_NAME_LEN 16U
174
175 #define BPF_ANY 0 /* create new element or update existing */
176 #define BPF_NOEXIST 1 /* create new element if it didn't exist */
177 #define BPF_EXIST 2 /* update existing element */
178 #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
179
180 #define aligned_uint64_t uint64_t __attribute__((aligned(8)))
181
182 union bpf_attr {
183 struct { /* anonymous struct used by BPF_MAP_CREATE command */
184 uint32_t map_type; /* one of enum bpf_map_type */
185 uint32_t key_size; /* size of key in bytes */
186 uint32_t value_size; /* size of value in bytes */
187 uint32_t max_entries; /* max number of entries in a map */
188 uint32_t map_flags; /* BPF_MAP_CREATE related
189 * flags defined above.
190 */
191 uint32_t inner_map_fd; /* fd pointing to the inner map */
192 uint32_t numa_node; /* numa node (effective only if
193 * BPF_F_NUMA_NODE is set).
194 */
195 char map_name[BPF_OBJ_NAME_LEN];
196 uint32_t map_ifindex; /* ifindex of netdev to create on */
197 uint32_t btf_fd; /* fd pointing to a BTF type data */
198 uint32_t btf_key_type_id; /* BTF type_id of the key */
199 uint32_t btf_value_type_id; /* BTF type_id of the value */
200 };
201
202 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
203 uint32_t map_fd;
204 aligned_uint64_t key;
205 union {
206 aligned_uint64_t value;
207 aligned_uint64_t next_key;
208 };
209 uint64_t flags;
210 };
211
212 struct { /* anonymous struct used by BPF_PROG_LOAD command */
213 uint32_t prog_type; /* one of enum bpf_prog_type */
214 uint32_t insn_cnt;
215 aligned_uint64_t insns;
216 aligned_uint64_t license;
217 uint32_t log_level; /* verbosity level of verifier */
218 uint32_t log_size; /* size of user buffer */
219 aligned_uint64_t log_buf; /* user supplied buffer */
220 uint32_t kern_version; /* not used */
221 uint32_t prog_flags;
222 char prog_name[BPF_OBJ_NAME_LEN];
223 uint32_t prog_ifindex; /* ifindex of netdev to prep for */
224 /* For some prog types expected attach type must be known at
225 * load time to verify attach type specific parts of prog
226 * (context accesses, allowed helpers, etc).
227 */
228 uint32_t expected_attach_type;
229 uint32_t prog_btf_fd; /* fd pointing to BTF type data */
230 uint32_t func_info_rec_size; /* userspace bpf_func_info size */
231 aligned_uint64_t func_info; /* func info */
232 uint32_t func_info_cnt; /* number of bpf_func_info records */
233 uint32_t line_info_rec_size; /* userspace bpf_line_info size */
234 aligned_uint64_t line_info; /* line info */
235 uint32_t line_info_cnt; /* number of bpf_line_info records */
236 };
237
238 struct { /* anonymous struct used by BPF_OBJ_* commands */
239 aligned_uint64_t pathname;
240 uint32_t bpf_fd;
241 uint32_t file_flags;
242 };
243
244 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
245 uint32_t target_fd; /* container object to attach to */
246 uint32_t attach_bpf_fd; /* eBPF program to attach */
247 uint32_t attach_type;
248 uint32_t attach_flags;
249 };
250
251 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
252 uint32_t prog_fd;
253 uint32_t retval;
254 uint32_t data_size_in; /* input: len of data_in */
255 uint32_t data_size_out; /* input/output: len of data_out
256 * returns ENOSPC if data_out
257 * is too small.
258 */
259 aligned_uint64_t data_in;
260 aligned_uint64_t data_out;
261 uint32_t repeat;
262 uint32_t duration;
263 uint32_t ctx_size_in; /* input: len of ctx_in */
264 uint32_t ctx_size_out; /* input/output: len of ctx_out
265 * returns ENOSPC if ctx_out
266 * is too small.
267 */
268 aligned_uint64_t ctx_in;
269 aligned_uint64_t ctx_out;
270 } test;
271
272 struct { /* anonymous struct used by BPF_*_GET_*_ID */
273 union {
274 uint32_t start_id;
275 uint32_t prog_id;
276 uint32_t map_id;
277 uint32_t btf_id;
278 };
279 uint32_t next_id;
280 uint32_t open_flags;
281 };
282
283 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
284 uint32_t bpf_fd;
285 uint32_t info_len;
286 aligned_uint64_t info;
287 } info;
288
289 struct { /* anonymous struct used by BPF_PROG_QUERY command */
290 uint32_t target_fd; /* container object to query */
291 uint32_t attach_type;
292 uint32_t query_flags;
293 uint32_t attach_flags;
294 aligned_uint64_t prog_ids;
295 uint32_t prog_cnt;
296 } query;
297
298 struct {
299 uint64_t name;
300 uint32_t prog_fd;
301 } raw_tracepoint;
302
303 struct { /* anonymous struct for BPF_BTF_LOAD */
304 aligned_uint64_t btf;
305 aligned_uint64_t btf_log_buf;
306 uint32_t btf_size;
307 uint32_t btf_log_size;
308 uint32_t btf_log_level;
309 };
310
311 struct {
312 uint32_t pid; /* input: pid */
313 uint32_t fd; /* input: fd */
314 uint32_t flags; /* input: flags */
315 uint32_t buf_len; /* input/output: buf len */
316 aligned_uint64_t buf; /* input/output:
317 * tp_name for tracepoint
318 * symbol for kprobe
319 * filename for uprobe
320 */
321 uint32_t prog_id; /* output: prod_id */
322 uint32_t fd_type; /* output: BPF_FD_TYPE_* */
323 uint64_t probe_offset; /* output: probe_offset */
324 uint64_t probe_addr; /* output: probe_addr */
325 } task_fd_query;
326 } __attribute__((aligned(8)));
327
328 #define __BPF_FUNC_MAPPER(FN) \
329 FN(unspec), \
330 FN(map_lookup_elem), \
331 FN(map_update_elem), \
332 FN(map_delete_elem), \
333 FN(probe_read), \
334 FN(ktime_get_ns), \
335 FN(trace_printk), \
336 FN(get_prandom_u32), \
337 FN(get_smp_processor_id), \
338 FN(skb_store_bytes), \
339 FN(l3_csum_replace), \
340 FN(l4_csum_replace), \
341 FN(tail_call), \
342 FN(clone_redirect), \
343 FN(get_current_pid_tgid), \
344 FN(get_current_uid_gid), \
345 FN(get_current_comm), \
346 FN(get_cgroup_classid), \
347 FN(skb_vlan_push), \
348 FN(skb_vlan_pop), \
349 FN(skb_get_tunnel_key), \
350 FN(skb_set_tunnel_key), \
351 FN(perf_event_read), \
352 FN(redirect), \
353 FN(get_route_realm), \
354 FN(perf_event_output), \
355 FN(skb_load_bytes), \
356 FN(get_stackid), \
357 FN(csum_diff), \
358 FN(skb_get_tunnel_opt), \
359 FN(skb_set_tunnel_opt), \
360 FN(skb_change_proto), \
361 FN(skb_change_type), \
362 FN(skb_under_cgroup), \
363 FN(get_hash_recalc), \
364 FN(get_current_task), \
365 FN(probe_write_user), \
366 FN(current_task_under_cgroup), \
367 FN(skb_change_tail), \
368 FN(skb_pull_data), \
369 FN(csum_update), \
370 FN(set_hash_invalid), \
371 FN(get_numa_node_id), \
372 FN(skb_change_head), \
373 FN(xdp_adjust_head), \
374 FN(probe_read_str), \
375 FN(get_socket_cookie), \
376 FN(get_socket_uid), \
377 FN(set_hash), \
378 FN(setsockopt), \
379 FN(skb_adjust_room), \
380 FN(redirect_map), \
381 FN(sk_redirect_map), \
382 FN(sock_map_update), \
383 FN(xdp_adjust_meta), \
384 FN(perf_event_read_value), \
385 FN(perf_prog_read_value), \
386 FN(getsockopt), \
387 FN(override_return), \
388 FN(sock_ops_cb_flags_set), \
389 FN(msg_redirect_map), \
390 FN(msg_apply_bytes), \
391 FN(msg_cork_bytes), \
392 FN(msg_pull_data), \
393 FN(bind), \
394 FN(xdp_adjust_tail), \
395 FN(skb_get_xfrm_state), \
396 FN(get_stack), \
397 FN(skb_load_bytes_relative), \
398 FN(fib_lookup), \
399 FN(sock_hash_update), \
400 FN(msg_redirect_hash), \
401 FN(sk_redirect_hash), \
402 FN(lwt_push_encap), \
403 FN(lwt_seg6_store_bytes), \
404 FN(lwt_seg6_adjust_srh), \
405 FN(lwt_seg6_action), \
406 FN(rc_repeat), \
407 FN(rc_keydown), \
408 FN(skb_cgroup_id), \
409 FN(get_current_cgroup_id), \
410 FN(get_local_storage), \
411 FN(sk_select_reuseport), \
412 FN(skb_ancestor_cgroup_id), \
413 FN(sk_lookup_tcp), \
414 FN(sk_lookup_udp), \
415 FN(sk_release), \
416 FN(map_push_elem), \
417 FN(map_pop_elem), \
418 FN(map_peek_elem), \
419 FN(msg_push_data), \
420 FN(msg_pop_data), \
421 FN(rc_pointer_rel), \
422 FN(spin_lock), \
423 FN(spin_unlock), \
424 FN(sk_fullsock), \
425 FN(tcp_sock), \
426 FN(skb_ecn_set_ce), \
427 FN(get_listener_sock), \
428 FN(skc_lookup_tcp), \
429 FN(tcp_check_syncookie), \
430 FN(sysctl_get_name), \
431 FN(sysctl_get_current_value), \
432 FN(sysctl_get_new_value), \
433 FN(sysctl_set_new_value), \
434 FN(strtol), \
435 FN(strtoul), \
436 FN(sk_storage_get), \
437 FN(sk_storage_delete), \
438 FN(send_signal), \
439 FN(tcp_gen_syncookie), \
440 FN(skb_output), \
441 FN(probe_read_user), \
442 FN(probe_read_kernel), \
443 FN(probe_read_user_str), \
444 FN(probe_read_kernel_str), \
445 FN(tcp_send_ack), \
446 FN(send_signal_thread), \
447 FN(jiffies64), \
448 FN(read_branch_records), \
449 FN(get_ns_current_pid_tgid), \
450 FN(xdp_output), \
451 FN(get_netns_cookie), \
452 FN(get_current_ancestor_cgroup_id), \
453 FN(sk_assign), \
454 FN(ktime_get_boot_ns), \
455 FN(seq_printf), \
456 FN(seq_write), \
457 FN(sk_cgroup_id), \
458 FN(sk_ancestor_cgroup_id), \
459 FN(ringbuf_output), \
460 FN(ringbuf_reserve), \
461 FN(ringbuf_submit), \
462 FN(ringbuf_discard), \
463 FN(ringbuf_query), \
464 FN(csum_level),
465
466 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
467 * function eBPF program intends to call
468 */
469 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
470 enum bpf_func_id {
471 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
472 __BPF_FUNC_MAX_ID,
473 };
474 #undef __BPF_ENUM_FN
475
476 /* End copy from linux/bpf.h */
477
478 /* Start copy from tools/include/filter.h */
479
480 #define BPF_ALU64_REG(OP, DST, SRC) \
481 ((struct bpf_insn) { \
482 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
483 .dst_reg = DST, \
484 .src_reg = SRC, \
485 .off = 0, \
486 .imm = 0 })
487
488 #define BPF_ALU32_REG(OP, DST, SRC) \
489 ((struct bpf_insn) { \
490 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
491 .dst_reg = DST, \
492 .src_reg = SRC, \
493 .off = 0, \
494 .imm = 0 })
495
496 #define BPF_ALU64_IMM(OP, DST, IMM) \
497 ((struct bpf_insn) { \
498 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
499 .dst_reg = DST, \
500 .src_reg = 0, \
501 .off = 0, \
502 .imm = IMM })
503
504 #define BPF_ALU32_IMM(OP, DST, IMM) \
505 ((struct bpf_insn) { \
506 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
507 .dst_reg = DST, \
508 .src_reg = 0, \
509 .off = 0, \
510 .imm = IMM })
511
512 #define BPF_MOV64_REG(DST, SRC) \
513 ((struct bpf_insn) { \
514 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
515 .dst_reg = DST, \
516 .src_reg = SRC, \
517 .off = 0, \
518 .imm = 0 })
519
520 #define BPF_MOV32_REG(DST, SRC) \
521 ((struct bpf_insn) { \
522 .code = BPF_ALU | BPF_MOV | BPF_X, \
523 .dst_reg = DST, \
524 .src_reg = SRC, \
525 .off = 0, \
526 .imm = 0 })
527
528 #define BPF_LD_IMM64(DST, IMM) \
529 BPF_LD_IMM64_RAW(DST, 0, IMM)
530
531 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
532 ((struct bpf_insn) { \
533 .code = BPF_LD | BPF_DW | BPF_IMM, \
534 .dst_reg = DST, \
535 .src_reg = SRC, \
536 .off = 0, \
537 .imm = (uint32_t) (IMM) }), \
538 ((struct bpf_insn) { \
539 .code = 0, /* zero is reserved opcode */ \
540 .dst_reg = 0, \
541 .src_reg = 0, \
542 .off = 0, \
543 .imm = ((uint64_t) (IMM)) >> 32 })
544
545 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
546 #define BPF_LD_MAP_FD(DST, MAP_FD) \
547 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
548
549 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
550 ((struct bpf_insn) { \
551 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
552 .dst_reg = DST, \
553 .src_reg = 0, \
554 .off = OFF, \
555 .imm = IMM })
556
557 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
558 ((struct bpf_insn) { \
559 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
560 .dst_reg = DST, \
561 .src_reg = SRC, \
562 .off = OFF, \
563 .imm = 0 })
564
565 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
566 ((struct bpf_insn) { \
567 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
568 .dst_reg = DST, \
569 .src_reg = SRC, \
570 .off = OFF, \
571 .imm = 0 })
572
573 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
574 ((struct bpf_insn) { \
575 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
576 .dst_reg = DST, \
577 .src_reg = 0, \
578 .off = OFF, \
579 .imm = IMM })
580
581 #define BPF_MOV64_IMM(DST, IMM) \
582 ((struct bpf_insn) { \
583 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
584 .dst_reg = DST, \
585 .src_reg = 0, \
586 .off = 0, \
587 .imm = IMM })
588
589 #define BPF_MOV32_IMM(DST, IMM) \
590 ((struct bpf_insn) { \
591 .code = BPF_ALU | BPF_MOV | BPF_K, \
592 .dst_reg = DST, \
593 .src_reg = 0, \
594 .off = 0, \
595 .imm = IMM })
596
597 #define BPF_EMIT_CALL(FUNC) \
598 ((struct bpf_insn) { \
599 .code = BPF_JMP | BPF_CALL, \
600 .dst_reg = 0, \
601 .src_reg = 0, \
602 .off = 0, \
603 .imm = ((FUNC) - BPF_FUNC_unspec) })
604
605 #define BPF_EXIT_INSN() \
606 ((struct bpf_insn) { \
607 .code = BPF_JMP | BPF_EXIT, \
608 .dst_reg = 0, \
609 .src_reg = 0, \
610 .off = 0, \
611 .imm = 0 })
612
613 /* End copy from tools/include/filter.h */
614
615 /* Start copy from tools/lib/bpf */
ptr_to_u64(const void * ptr)616 static inline uint64_t ptr_to_u64(const void *ptr)
617 {
618 return (uint64_t) (unsigned long) ptr;
619 }
620
bpf(enum bpf_cmd cmd,union bpf_attr * attr,unsigned int size)621 static inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size)
622 {
623 return tst_syscall(__NR_bpf, cmd, attr, size);
624 }
625 /* End copy from tools/lib/bpf */
626
627 #endif /* LAPI_BPF_H__ */
628