xref: /aosp_15_r20/external/bcc/src/cc/export/helpers.h (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 R"********(
2 /*
3  * Copyright (c) 2015 PLUMgrid, Inc.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 #ifndef __BPF_HELPERS_H
18 #define __BPF_HELPERS_H
19 
20 /* In Linux 5.4 asm_inline was introduced, but it's not supported by clang.
21  * Redefine it to just asm to enable successful compilation.
22  */
23 #ifdef asm_inline
24 #undef asm_inline
25 #define asm_inline asm
26 #endif
27 
28 /* Before bpf_helpers.h is included, uapi bpf.h has been
29  * included, which references linux/types.h. This may bring
30  * in asm_volatile_goto definition if permitted based on
31  * compiler setup and kernel configs.
32  *
33  * clang does not support "asm volatile goto" yet.
34  * So redefine asm_volatile_goto to some invalid asm code.
35  * If asm_volatile_goto is actually used by the bpf program,
36  * a compilation error will appear.
37  */
38 #ifdef asm_volatile_goto
39 #undef asm_volatile_goto
40 #endif
41 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
42 
43 /* In 4.18 and later, when CONFIG_FUNCTION_TRACER is defined, kernel Makefile adds
44  * -DCC_USING_FENTRY. Let do the same for bpf programs.
45  */
46 #if defined(CONFIG_FUNCTION_TRACER)
47 #define CC_USING_FENTRY
48 #endif
49 
50 #include <uapi/linux/bpf.h>
51 #include <uapi/linux/if_packet.h>
52 #include <linux/version.h>
53 #include <linux/log2.h>
54 #include <asm/page.h>
55 
56 #ifndef CONFIG_BPF_SYSCALL
57 #error "CONFIG_BPF_SYSCALL is undefined, please check your .config or ask your Linux distro to enable this feature"
58 #endif
59 
60 #ifdef PERF_MAX_STACK_DEPTH
61 #define BPF_MAX_STACK_DEPTH PERF_MAX_STACK_DEPTH
62 #else
63 #define BPF_MAX_STACK_DEPTH 127
64 #endif
65 
66 /* helper macro to place programs, maps, license in
67  * different sections in elf_bpf file. Section names
68  * are interpreted by elf_bpf loader
69  */
70 #define BCC_SEC(NAME) __attribute__((section(NAME), used))
71 
72 #ifdef B_WORKAROUND
73 #define BCC_SEC_HELPERS BCC_SEC("helpers")
74 #else
75 #define BCC_SEC_HELPERS
76 #endif
77 
78 // Associate map with its key/value types
79 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)	\
80         struct ____btf_map_##name {			\
81                 type_key key;				\
82                 type_val value;				\
83         };						\
84         struct ____btf_map_##name			\
85         __attribute__ ((section(".maps." #name), used))	\
86                 ____btf_map_##name = { }
87 
88 // Associate map with its key/value types for QUEUE/STACK map types
89 #define BPF_ANNOTATE_KV_PAIR_QUEUESTACK(name, type_val)  \
90         struct ____btf_map_##name {     \
91                 type_val value;       \
92         };            \
93         struct ____btf_map_##name     \
94         __attribute__ ((section(".maps." #name), used)) \
95                 ____btf_map_##name = { }
96 
97 // Changes to the macro require changes in BFrontendAction classes
98 #define BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, _flags) \
99 struct _name##_table_t { \
100   _key_type key; \
101   _leaf_type leaf; \
102   _leaf_type * (*lookup) (_key_type *); \
103   _leaf_type * (*lookup_or_init) (_key_type *, _leaf_type *); \
104   _leaf_type * (*lookup_or_try_init) (_key_type *, _leaf_type *); \
105   int (*update) (_key_type *, _leaf_type *); \
106   int (*insert) (_key_type *, _leaf_type *); \
107   int (*delete) (_key_type *); \
108   void (*call) (void *, int index); \
109   void (*increment) (_key_type, ...); \
110   void (*atomic_increment) (_key_type, ...); \
111   int (*get_stackid) (void *, u64); \
112   void * (*sk_storage_get) (void *, void *, int); \
113   int (*sk_storage_delete) (void *); \
114   void * (*inode_storage_get) (void *, void *, int); \
115   int (*inode_storage_delete) (void *); \
116   void * (*task_storage_get) (void *, void *, int); \
117   int (*task_storage_delete) (void *); \
118   u32 max_entries; \
119   int flags; \
120 }; \
121 __attribute__((section("maps/" _table_type))) \
122 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }; \
123 BPF_ANNOTATE_KV_PAIR(_name, _key_type, _leaf_type)
124 
125 
126 // Changes to the macro require changes in BFrontendAction classes
127 #define BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags) \
128 struct _name##_table_t { \
129   _leaf_type leaf; \
130   int * (*peek) (_leaf_type *); \
131   int * (*pop) (_leaf_type *); \
132   int * (*push) (_leaf_type *, u64); \
133   u32 max_entries; \
134   int flags; \
135 }; \
136 __attribute__((section("maps/" _table_type))) \
137 struct _name##_table_t _name = { .flags = (_flags), .max_entries = (_max_entries) }; \
138 BPF_ANNOTATE_KV_PAIR_QUEUESTACK(_name, _leaf_type)
139 
140 // define queue with 3 parameters (_type=queue/stack automatically) and default flags to 0
141 #define BPF_QUEUE_STACK3(_type, _name, _leaf_type, _max_entries) \
142   BPF_QUEUESTACK(_type, _name, _leaf_type, _max_entries, 0)
143 
144 // define queue with 4 parameters (_type=queue/stack automatically)
145 #define BPF_QUEUE_STACK4(_type, _name, _leaf_type, _max_entries, _flags) \
146   BPF_QUEUESTACK(_type, _name, _leaf_type, _max_entries, _flags)
147 
148 // helper for default-variable macro function
149 #define BPF_QUEUE_STACKX(_1, _2, _3, _4, NAME, ...) NAME
150 
151 #define BPF_QUEUE(...) \
152   BPF_QUEUE_STACKX(__VA_ARGS__, BPF_QUEUE_STACK4, BPF_QUEUE_STACK3)("queue", __VA_ARGS__)
153 
154 #define BPF_STACK(...) \
155   BPF_QUEUE_STACKX(__VA_ARGS__, BPF_QUEUE_STACK4, BPF_QUEUE_STACK3)("stack", __VA_ARGS__)
156 
157 #define BPF_QUEUESTACK_PINNED(_table_type, _name, _leaf_type, _max_entries, _flags, _pinned) \
158 BPF_QUEUESTACK(_table_type ":" _pinned, _name, _leaf_type, _max_entries, _flags)
159 
160 #define BPF_QUEUESTACK_PUBLIC(_table_type, _name, _leaf_type, _max_entries, _flags) \
161 BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags); \
162 __attribute__((section("maps/export"))) \
163 struct _name##_table_t __##_name
164 
165 #define BPF_QUEUESTACK_SHARED(_table_type, _name, _leaf_type, _max_entries, _flags) \
166 BPF_QUEUESTACK(_table_type, _name, _leaf_type, _max_entries, _flags); \
167 __attribute__((section("maps/shared"))) \
168 struct _name##_table_t __##_name
169 
170 #define BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries) \
171 BPF_F_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries, 0)
172 
173 #define BPF_TABLE_PINNED7(_table_type, _key_type, _leaf_type, _name, _max_entries, _pinned, _flags) \
174   BPF_F_TABLE(_table_type ":" _pinned, _key_type, _leaf_type, _name, _max_entries, _flags)
175 
176 #define BPF_TABLE_PINNED6(_table_type, _key_type, _leaf_type, _name, _max_entries, _pinned) \
177   BPF_F_TABLE(_table_type ":" _pinned, _key_type, _leaf_type, _name, _max_entries, 0)
178 
179 #define BPF_TABLE_PINNEDX(_1, _2, _3, _4, _5, _6, _7, NAME, ...) NAME
180 
181 // Define a pinned table with optional flags argument
182 #define BPF_TABLE_PINNED(...) \
183   BPF_TABLE_PINNEDX(__VA_ARGS__, BPF_TABLE_PINNED7, BPF_TABLE_PINNED6)(__VA_ARGS__)
184 
185 // define a table same as above but allow it to be referenced by other modules
186 #define BPF_TABLE_PUBLIC(_table_type, _key_type, _leaf_type, _name, _max_entries) \
187 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
188 __attribute__((section("maps/export"))) \
189 struct _name##_table_t __##_name
190 
191 // define a table that is shared across the programs in the same namespace
192 #define BPF_TABLE_SHARED(_table_type, _key_type, _leaf_type, _name, _max_entries) \
193 BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \
194 __attribute__((section("maps/shared"))) \
195 struct _name##_table_t __##_name
196 
197 // Identifier for current CPU used in perf_submit and perf_read
198 // Prefer BPF_F_CURRENT_CPU flag, falls back to call helper for older kernel
199 // Can be overridden from BCC
200 #ifndef CUR_CPU_IDENTIFIER
201 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
202 #define CUR_CPU_IDENTIFIER BPF_F_CURRENT_CPU
203 #else
204 #define CUR_CPU_IDENTIFIER bpf_get_smp_processor_id()
205 #endif
206 #endif
207 
208 // Table for pushing custom events to userspace via perf ring buffer
209 #define BPF_PERF_OUTPUT(_name) \
210 struct _name##_table_t { \
211   int key; \
212   u32 leaf; \
213   /* map.perf_submit(ctx, data, data_size) */ \
214   int (*perf_submit) (void *, void *, u32); \
215   int (*perf_submit_skb) (void *, u32, void *, u32); \
216   u32 max_entries; \
217 }; \
218 __attribute__((section("maps/perf_output"))) \
219 struct _name##_table_t _name = { .max_entries = 0 }
220 
221 // Table for pushing custom events to userspace via ring buffer
222 #define BPF_RINGBUF_OUTPUT(_name, _num_pages) \
223 struct _name##_table_t { \
224   int key; \
225   u32 leaf; \
226   /* map.ringbuf_output(data, data_size, flags) */ \
227   int (*ringbuf_output) (void *, u64, u64); \
228   /* map.ringbuf_reserve(data_size) */ \
229   void* (*ringbuf_reserve) (u64); \
230   /* map.ringbuf_discard(data, flags) */ \
231   void (*ringbuf_discard) (void *, u64); \
232   /* map.ringbuf_submit(data, flags) */ \
233   void (*ringbuf_submit) (void *, u64); \
234   /* map.ringbuf_query(flags) */ \
235   u64 (*ringbuf_query) (u64); \
236   u32 max_entries; \
237 }; \
238 __attribute__((section("maps/ringbuf"))) \
239 struct _name##_table_t _name = { .max_entries = ((_num_pages) * PAGE_SIZE) }
240 
241 // Table for reading hw perf cpu counters
242 #define BPF_PERF_ARRAY(_name, _max_entries) \
243 struct _name##_table_t { \
244   int key; \
245   u32 leaf; \
246   /* counter = map.perf_read(index) */ \
247   u64 (*perf_read) (int); \
248   int (*perf_counter_value) (int, void *, u32); \
249   u32 max_entries; \
250 }; \
251 __attribute__((section("maps/perf_array"))) \
252 struct _name##_table_t _name = { .max_entries = (_max_entries) }
253 
254 // Table for cgroup file descriptors
255 #define BPF_CGROUP_ARRAY(_name, _max_entries) \
256 struct _name##_table_t { \
257   int key; \
258   u32 leaf; \
259   int (*check_current_task) (int); \
260   u32 max_entries; \
261 }; \
262 __attribute__((section("maps/cgroup_array"))) \
263 struct _name##_table_t _name = { .max_entries = (_max_entries) }
264 
265 #define BPF_HASH1(_name) \
266   BPF_TABLE("hash", u64, u64, _name, 10240)
267 #define BPF_HASH2(_name, _key_type) \
268   BPF_TABLE("hash", _key_type, u64, _name, 10240)
269 #define BPF_HASH3(_name, _key_type, _leaf_type) \
270   BPF_TABLE("hash", _key_type, _leaf_type, _name, 10240)
271 #define BPF_HASH4(_name, _key_type, _leaf_type, _size) \
272   BPF_TABLE("hash", _key_type, _leaf_type, _name, _size)
273 
274 // helper for default-variable macro function
275 #define BPF_HASHX(_1, _2, _3, _4, NAME, ...) NAME
276 
277 // Define a hash function, some arguments optional
278 // BPF_HASH(name, key_type=u64, leaf_type=u64, size=10240)
279 #define BPF_HASH(...) \
280   BPF_HASHX(__VA_ARGS__, BPF_HASH4, BPF_HASH3, BPF_HASH2, BPF_HASH1)(__VA_ARGS__)
281 
282 #define BPF_PERCPU_HASH1(_name) \
283   BPF_TABLE("percpu_hash", u64, u64, _name, 10240)
284 #define BPF_PERCPU_HASH2(_name, _key_type) \
285   BPF_TABLE("percpu_hash", _key_type, u64, _name, 10240)
286 #define BPF_PERCPU_HASH3(_name, _key_type, _leaf_type) \
287   BPF_TABLE("percpu_hash", _key_type, _leaf_type, _name, 10240)
288 #define BPF_PERCPU_HASH4(_name, _key_type, _leaf_type, _size) \
289   BPF_TABLE("percpu_hash", _key_type, _leaf_type, _name, _size)
290 
291 // helper for default-variable macro function
292 #define BPF_PERCPU_HASHX(_1, _2, _3, _4, NAME, ...) NAME
293 
294 // Define a hash function, some arguments optional
295 // BPF_PERCPU_HASH(name, key_type=u64, leaf_type=u64, size=10240)
296 #define BPF_PERCPU_HASH(...)                                            \
297   BPF_PERCPU_HASHX(                                                     \
298     __VA_ARGS__, BPF_PERCPU_HASH4, BPF_PERCPU_HASH3, BPF_PERCPU_HASH2, BPF_PERCPU_HASH1) \
299            (__VA_ARGS__)
300 
301 #define BPF_ARRAY1(_name) \
302   BPF_TABLE("array", int, u64, _name, 10240)
303 #define BPF_ARRAY2(_name, _leaf_type) \
304   BPF_TABLE("array", int, _leaf_type, _name, 10240)
305 #define BPF_ARRAY3(_name, _leaf_type, _size) \
306   BPF_TABLE("array", int, _leaf_type, _name, _size)
307 
308 // helper for default-variable macro function
309 #define BPF_ARRAYX(_1, _2, _3, NAME, ...) NAME
310 
311 // Define an array function, some arguments optional
312 // BPF_ARRAY(name, leaf_type=u64, size=10240)
313 #define BPF_ARRAY(...) \
314   BPF_ARRAYX(__VA_ARGS__, BPF_ARRAY3, BPF_ARRAY2, BPF_ARRAY1)(__VA_ARGS__)
315 
316 #define BPF_PERCPU_ARRAY1(_name)                        \
317     BPF_TABLE("percpu_array", int, u64, _name, 10240)
318 #define BPF_PERCPU_ARRAY2(_name, _leaf_type) \
319     BPF_TABLE("percpu_array", int, _leaf_type, _name, 10240)
320 #define BPF_PERCPU_ARRAY3(_name, _leaf_type, _size) \
321     BPF_TABLE("percpu_array", int, _leaf_type, _name, _size)
322 
323 // helper for default-variable macro function
324 #define BPF_PERCPU_ARRAYX(_1, _2, _3, NAME, ...) NAME
325 
326 // Define an array function (per CPU), some arguments optional
327 // BPF_PERCPU_ARRAY(name, leaf_type=u64, size=10240)
328 #define BPF_PERCPU_ARRAY(...)                                           \
329   BPF_PERCPU_ARRAYX(                                                    \
330     __VA_ARGS__, BPF_PERCPU_ARRAY3, BPF_PERCPU_ARRAY2, BPF_PERCPU_ARRAY1) \
331            (__VA_ARGS__)
332 
333 #define BPF_HIST1(_name) \
334   BPF_TABLE("histogram", int, u64, _name, 64)
335 #define BPF_HIST2(_name, _key_type) \
336   BPF_TABLE("histogram", _key_type, u64, _name, 64)
337 #define BPF_HIST3(_name, _key_type, _size) \
338   BPF_TABLE("histogram", _key_type, u64, _name, _size)
339 #define BPF_HISTX(_1, _2, _3, NAME, ...) NAME
340 
341 // Define a histogram, some arguments optional
342 // BPF_HISTOGRAM(name, key_type=int, size=64)
343 #define BPF_HISTOGRAM(...) \
344   BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__)
345 
346 #define BPF_LPM_TRIE1(_name) \
347   BPF_F_TABLE("lpm_trie", u64, u64, _name, 10240, BPF_F_NO_PREALLOC)
348 #define BPF_LPM_TRIE2(_name, _key_type) \
349   BPF_F_TABLE("lpm_trie", _key_type, u64, _name, 10240, BPF_F_NO_PREALLOC)
350 #define BPF_LPM_TRIE3(_name, _key_type, _leaf_type) \
351   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, 10240, BPF_F_NO_PREALLOC)
352 #define BPF_LPM_TRIE4(_name, _key_type, _leaf_type, _size) \
353   BPF_F_TABLE("lpm_trie", _key_type, _leaf_type, _name, _size, BPF_F_NO_PREALLOC)
354 #define BPF_LPM_TRIEX(_1, _2, _3, _4, NAME, ...) NAME
355 
356 // Define a LPM trie function, some arguments optional
357 // BPF_LPM_TRIE(name, key_type=u64, leaf_type=u64, size=10240)
358 #define BPF_LPM_TRIE(...) \
359   BPF_LPM_TRIEX(__VA_ARGS__, BPF_LPM_TRIE4, BPF_LPM_TRIE3, BPF_LPM_TRIE2, BPF_LPM_TRIE1)(__VA_ARGS__)
360 
361 struct bpf_stacktrace {
362   u64 ip[BPF_MAX_STACK_DEPTH];
363 };
364 
365 struct bpf_stacktrace_buildid {
366   struct bpf_stack_build_id trace[BPF_MAX_STACK_DEPTH];
367 };
368 
369 #define BPF_STACK_TRACE(_name, _max_entries) \
370   BPF_TABLE("stacktrace", int, struct bpf_stacktrace, _name, roundup_pow_of_two(_max_entries))
371 
372 #define BPF_STACK_TRACE_BUILDID(_name, _max_entries) \
373   BPF_F_TABLE("stacktrace", int, struct bpf_stacktrace_buildid, _name, roundup_pow_of_two(_max_entries), BPF_F_STACK_BUILD_ID)
374 
375 #define BPF_PROG_ARRAY(_name, _max_entries) \
376   BPF_TABLE("prog", u32, u32, _name, _max_entries)
377 
378 #define BPF_XDP_REDIRECT_MAP(_table_type, _leaf_type, _name, _max_entries) \
379 struct _name##_table_t { \
380   u32 key; \
381   _leaf_type leaf; \
382   /* xdp_act = map.redirect_map(index, flag) */ \
383   u64 (*redirect_map) (int, int); \
384   u32 max_entries; \
385 }; \
386 __attribute__((section("maps/"_table_type))) \
387 struct _name##_table_t _name = { .max_entries = (_max_entries) }
388 
389 #define BPF_DEVMAP(_name, _max_entries) \
390   BPF_XDP_REDIRECT_MAP("devmap", int, _name, _max_entries)
391 
392 #define BPF_CPUMAP(_name, _max_entries) \
393   BPF_XDP_REDIRECT_MAP("cpumap", u32, _name, _max_entries)
394 
395 #define _BPF_XSKMAP(_name, _max_entries, _pinned) \
396 struct _name##_table_t { \
397   u32 key; \
398   int leaf; \
399   int * (*lookup) (int *); \
400   /* xdp_act = map.redirect_map(index, flag) */ \
401   u64 (*redirect_map) (int, int); \
402   u32 max_entries; \
403 }; \
404 __attribute__((section("maps/xskmap" _pinned))) \
405 struct _name##_table_t _name = { .max_entries = (_max_entries) }
406 #define BPF_XSKMAP2(_name, _max_entries) _BPF_XSKMAP(_name, _max_entries, "")
407 #define BPF_XSKMAP3(_name, _max_entries, _pinned) _BPF_XSKMAP(_name, _max_entries, ":" _pinned)
408 #define BPF_XSKMAPX(_1, _2, _3, NAME, ...) NAME
409 #define BPF_XSKMAP(...) BPF_XSKMAPX(__VA_ARGS__, BPF_XSKMAP3, BPF_XSKMAP2)(__VA_ARGS__)
410 
411 #define BPF_ARRAY_OF_MAPS(_name, _inner_map_name, _max_entries) \
412   BPF_TABLE("array_of_maps$" _inner_map_name, int, int, _name, _max_entries)
413 
414 #define BPF_HASH_OF_MAPS2(_name, _inner_map_name) \
415   BPF_TABLE("hash_of_maps$" _inner_map_name, int, int, _name, 10240)
416 #define BPF_HASH_OF_MAPS3(_name, _key_type, _inner_map_name) \
417   BPF_TABLE("hash_of_maps$" _inner_map_name, _key_type, int, _name, 10240)
418 #define BPF_HASH_OF_MAPS4(_name, _key_type, _inner_map_name, _max_entries) \
419   BPF_TABLE("hash_of_maps$" _inner_map_name, _key_type, int, _name, _max_entries)
420 
421 #define BPF_HASH_OF_MAPSX(_name, _2, _3, _4, NAME, ...) NAME
422 
423 #define BPF_HASH_OF_MAPS(...) \
424   BPF_HASH_OF_MAPSX(__VA_ARGS__, BPF_HASH_OF_MAPS4, BPF_HASH_OF_MAPS3, BPF_HASH_OF_MAPS2)(__VA_ARGS__)
425 
426 #define BPF_SK_STORAGE(_name, _leaf_type) \
427 struct _name##_table_t { \
428   int key; \
429   _leaf_type leaf; \
430   void * (*sk_storage_get) (void *, void *, int); \
431   int (*sk_storage_delete) (void *); \
432   u32 flags; \
433 }; \
434 __attribute__((section("maps/sk_storage"))) \
435 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \
436 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type)
437 
438 #define BPF_INODE_STORAGE(_name, _leaf_type) \
439 struct _name##_table_t { \
440   int key; \
441   _leaf_type leaf; \
442   void * (*inode_storage_get) (void *, void *, int); \
443   int (*inode_storage_delete) (void *); \
444   u32 flags; \
445 }; \
446 __attribute__((section("maps/inode_storage"))) \
447 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \
448 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type)
449 
450 #define BPF_TASK_STORAGE(_name, _leaf_type) \
451 struct _name##_table_t { \
452   int key; \
453   _leaf_type leaf; \
454   void * (*task_storage_get) (void *, void *, int); \
455   int (*task_storage_delete) (void *); \
456   u32 flags; \
457 }; \
458 __attribute__((section("maps/task_storage"))) \
459 struct _name##_table_t _name = { .flags = BPF_F_NO_PREALLOC }; \
460 BPF_ANNOTATE_KV_PAIR(_name, int, _leaf_type)
461 
462 #define BPF_SOCKMAP_COMMON(_name, _max_entries, _kind, _helper_name) \
463 struct _name##_table_t { \
464   u32 key; \
465   int leaf; \
466   int (*update) (u32 *, int *); \
467   int (*delete) (u32 *); \
468   /* ret = map.sock_map_update(ctx, key, flag) */ \
469   int (* _helper_name) (void *, void *, u64); \
470   u32 max_entries; \
471 }; \
472 __attribute__((section("maps/" _kind))) \
473 struct _name##_table_t _name = { .max_entries = (_max_entries) }; \
474 BPF_ANNOTATE_KV_PAIR(_name, u32, int)
475 
476 #define BPF_SOCKMAP(_name, _max_entries) \
477   BPF_SOCKMAP_COMMON(_name, _max_entries, "sockmap", sock_map_update)
478 
479 #define BPF_SOCKHASH_COMMON(_name, _key_type, _max_entries) \
480 struct _name##_table_t {\
481   _key_type key;\
482   int leaf; \
483   int (*update) (_key_type *, int *); \
484   int (*delete) (_key_type *); \
485   int (*sock_hash_update) (void *, void *, u64); \
486   int (*msg_redirect_hash) (void *, void *, u64); \
487   int (*sk_redirect_hash) (void *, void *, u64); \
488   u32 max_entries; \
489 }; \
490 __attribute__((section("maps/sockhash"))) \
491 struct _name##_table_t _name = { .max_entries = (_max_entries) }; \
492 BPF_ANNOTATE_KV_PAIR(_name, _key_type, int)
493 
494 #define BPF_SOCKHASH1(_name) \
495   BPF_SOCKHASH_COMMON(_name, u32, 10240)
496 #define BPF_SOCKHASH2(_name, _key_type) \
497   BPF_SOCKHASH_COMMON(_name, _key_type, 10240)
498 #define BPF_SOCKHASH3(_name, _key_type, _max_entries) \
499   BPF_SOCKHASH_COMMON(_name, _key_type, _max_entries)
500 
501 #define BPF_SOCKHASHX(_1, _2, _3, NAME, ...) NAME
502 // We can define a five-tuple as the key, and basically never define the val type.
503 // BPF_SOCKHASH(name, key_type=u64, size=10240)
504 #define BPF_SOCKHASH(...) \
505   BPF_SOCKHASHX(__VA_ARGS__, BPF_SOCKHASH3, BPF_SOCKHASH2, BPF_SOCKHASH1)(__VA_ARGS__)
506 
507 #define BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, _kind) \
508 struct _name##_table_t { \
509   struct bpf_cgroup_storage_key key; \
510   _leaf_type leaf; \
511   _leaf_type * (*lookup) (struct bpf_cgroup_storage_key *); \
512   int (*update) (struct bpf_cgroup_storage_key *, _leaf_type *); \
513   int (*get_local_storage) (u64); \
514 }; \
515 __attribute__((section("maps/" _kind))) \
516 struct _name##_table_t _name = { 0 }; \
517 BPF_ANNOTATE_KV_PAIR(_name, struct bpf_cgroup_storage_key, _leaf_type)
518 
519 #define BPF_CGROUP_STORAGE(_name, _leaf_type) \
520   BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, "cgroup_storage")
521 
522 #define BPF_PERCPU_CGROUP_STORAGE(_name, _leaf_type) \
523   BPF_CGROUP_STORAGE_COMMON(_name, _leaf_type, "percpu_cgroup_storage")
524 
525 // packet parsing state machine helpers
526 #define cursor_advance(_cursor, _len) \
527   ({ void *_tmp = _cursor; _cursor += _len; _tmp; })
528 
529 #ifdef LINUX_VERSION_CODE_OVERRIDE
530 unsigned _version BCC_SEC("version") = LINUX_VERSION_CODE_OVERRIDE;
531 #else
532 unsigned _version BCC_SEC("version") = LINUX_VERSION_CODE;
533 #endif
534 
535 /* helper functions called from eBPF programs written in C */
536 static void *(*bpf_map_lookup_elem)(void *map, void *key) =
537   (void *) BPF_FUNC_map_lookup_elem;
538 static int (*bpf_map_update_elem)(void *map, void *key, void *value, u64 flags) =
539   (void *) BPF_FUNC_map_update_elem;
540 static int (*bpf_map_delete_elem)(void *map, void *key) =
541   (void *) BPF_FUNC_map_delete_elem;
542 static int (*bpf_probe_read)(void *dst, u64 size, const void *unsafe_ptr) =
543   (void *) BPF_FUNC_probe_read;
544 static u64 (*bpf_ktime_get_ns)(void) =
545   (void *) BPF_FUNC_ktime_get_ns;
546 static u32 (*bpf_get_prandom_u32)(void) =
547   (void *) BPF_FUNC_get_prandom_u32;
548 static int (*bpf_trace_printk_)(const char *fmt, u64 fmt_size, ...) =
549   (void *) BPF_FUNC_trace_printk;
550 static int (*bpf_probe_read_str)(void *dst, u64 size, const void *unsafe_ptr) =
551   (void *) BPF_FUNC_probe_read_str;
552 int bpf_trace_printk(const char *fmt, ...) asm("llvm.bpf.extra");
553 static inline __attribute__((always_inline))
554 void bpf_tail_call_(void *map_fd, void *ctx, int index) {
555   ((void (*)(void *, u64, int))BPF_FUNC_tail_call)(ctx, (u64)map_fd, index);
556 }
557 static int (*bpf_clone_redirect)(void *ctx, int ifindex, u32 flags) =
558   (void *) BPF_FUNC_clone_redirect;
559 static u64 (*bpf_get_smp_processor_id)(void) =
560   (void *) BPF_FUNC_get_smp_processor_id;
561 static u64 (*bpf_get_current_pid_tgid)(void) =
562   (void *) BPF_FUNC_get_current_pid_tgid;
563 static u64 (*bpf_get_current_uid_gid)(void) =
564   (void *) BPF_FUNC_get_current_uid_gid;
565 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
566   (void *) BPF_FUNC_get_current_comm;
567 static u64 (*bpf_get_cgroup_classid)(void *ctx) =
568   (void *) BPF_FUNC_get_cgroup_classid;
569 static u64 (*bpf_skb_vlan_push)(void *ctx, u16 proto, u16 vlan_tci) =
570   (void *) BPF_FUNC_skb_vlan_push;
571 static u64 (*bpf_skb_vlan_pop)(void *ctx) =
572   (void *) BPF_FUNC_skb_vlan_pop;
573 static int (*bpf_skb_get_tunnel_key)(void *ctx, void *to, u32 size, u64 flags) =
574   (void *) BPF_FUNC_skb_get_tunnel_key;
575 static int (*bpf_skb_set_tunnel_key)(void *ctx, void *from, u32 size, u64 flags) =
576   (void *) BPF_FUNC_skb_set_tunnel_key;
577 static u64 (*bpf_perf_event_read)(void *map, u64 flags) =
578   (void *) BPF_FUNC_perf_event_read;
579 static int (*bpf_redirect)(int ifindex, u32 flags) =
580   (void *) BPF_FUNC_redirect;
581 static u32 (*bpf_get_route_realm)(void *ctx) =
582   (void *) BPF_FUNC_get_route_realm;
583 static int (*bpf_perf_event_output)(void *ctx, void *map, u64 index, void *data, u32 size) =
584   (void *) BPF_FUNC_perf_event_output;
585 static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) =
586   (void *) BPF_FUNC_skb_load_bytes;
587 static int (*bpf_perf_event_read_value)(void *map, u64 flags, void *buf, u32 buf_size) =
588   (void *) BPF_FUNC_perf_event_read_value;
589 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, u32 buf_size) =
590   (void *) BPF_FUNC_perf_prog_read_value;
591 static int (*bpf_current_task_under_cgroup)(void *map, int index) =
592   (void *) BPF_FUNC_current_task_under_cgroup;
593 static u32 (*bpf_get_socket_cookie)(void *ctx) =
594   (void *) BPF_FUNC_get_socket_cookie;
595 static u64 (*bpf_get_socket_uid)(void *ctx) =
596   (void *) BPF_FUNC_get_socket_uid;
597 static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
598   (void *) BPF_FUNC_getsockopt;
599 static int (*bpf_redirect_map)(void *map, int key, int flags) =
600   (void *) BPF_FUNC_redirect_map;
601 static int (*bpf_set_hash)(void *ctx, u32 hash) =
602   (void *) BPF_FUNC_set_hash;
603 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) =
604   (void *) BPF_FUNC_setsockopt;
605 static int (*bpf_skb_adjust_room)(void *ctx, int len_diff, u32 mode, u64 flags) =
606   (void *) BPF_FUNC_skb_adjust_room;
607 static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
608   (void *) BPF_FUNC_skb_under_cgroup;
609 static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, int size,
610                                               unsigned long long netns_id,
611                                               unsigned long long flags) =
612   (void *) BPF_FUNC_skc_lookup_tcp;
613 static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
614   (void *) BPF_FUNC_sk_redirect_map;
615 static int (*bpf_sock_map_update)(void *map, void *key, void *value, unsigned long long flags) =
616   (void *) BPF_FUNC_sock_map_update;
617 static int (*bpf_strtol)(const char *buf, size_t buf_len, u64 flags, long *res) =
618   (void *) BPF_FUNC_strtol;
619 static int (*bpf_strtoul)(const char *buf, size_t buf_len, u64 flags, unsigned long *res) =
620   (void *) BPF_FUNC_strtoul;
621 static int (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) =
622   (void *) BPF_FUNC_sysctl_get_current_value;
623 static int (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) =
624   (void *) BPF_FUNC_sysctl_get_name;
625 static int (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) =
626   (void *) BPF_FUNC_sysctl_get_new_value;
627 static int (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) =
628   (void *) BPF_FUNC_sysctl_set_new_value;
629 static int (*bpf_tcp_check_syncookie)(void *sk, void *ip, int ip_len, void *tcp,
630                                       int tcp_len) =
631   (void *) BPF_FUNC_tcp_check_syncookie;
632 static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
633   (void *) BPF_FUNC_xdp_adjust_meta;
634 
635 /* bcc_get_stackid will return a negative value in the case of an error
636  *
637  * BPF_STACK_TRACE(_name, _size) will allocate space for _size stack traces.
638  *  -ENOMEM will be returned when this limit is reached.
639  *
640  * -EFAULT is typically returned when requesting user-space stack straces (using
641  * BPF_F_USER_STACK) for kernel threads. However, a valid stackid may be
642  * returned in some cases; consider a tracepoint or kprobe executing in the
643  * kernel context. Given this you can typically ignore -EFAULT errors when
644  * retrieving user-space stack traces.
645  */
646 static int (*bcc_get_stackid_)(void *ctx, void *map, u64 flags) =
647   (void *) BPF_FUNC_get_stackid;
648 static inline __attribute__((always_inline))
649 int bcc_get_stackid(uintptr_t map, void *ctx, u64 flags) {
650   return bcc_get_stackid_(ctx, (void *)map, flags);
651 }
652 
653 static int (*bpf_csum_diff)(void *from, u64 from_size, void *to, u64 to_size, u64 seed) =
654   (void *) BPF_FUNC_csum_diff;
655 static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, u32 size) =
656   (void *) BPF_FUNC_skb_get_tunnel_opt;
657 static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, u32 size) =
658   (void *) BPF_FUNC_skb_set_tunnel_opt;
659 static int (*bpf_skb_change_proto)(void *ctx, u16 proto, u64 flags) =
660   (void *) BPF_FUNC_skb_change_proto;
661 static int (*bpf_skb_change_type)(void *ctx, u32 type) =
662   (void *) BPF_FUNC_skb_change_type;
663 static u32 (*bpf_get_hash_recalc)(void *ctx) =
664   (void *) BPF_FUNC_get_hash_recalc;
665 static u64 (*bpf_get_current_task)(void) =
666   (void *) BPF_FUNC_get_current_task;
667 static int (*bpf_probe_write_user)(void *dst, void *src, u32 size) =
668   (void *) BPF_FUNC_probe_write_user;
669 static int (*bpf_skb_change_tail)(void *ctx, u32 new_len, u64 flags) =
670   (void *) BPF_FUNC_skb_change_tail;
671 static int (*bpf_skb_pull_data)(void *ctx, u32 len) =
672   (void *) BPF_FUNC_skb_pull_data;
673 static int (*bpf_csum_update)(void *ctx, u16 csum) =
674   (void *) BPF_FUNC_csum_update;
675 static int (*bpf_set_hash_invalid)(void *ctx) =
676   (void *) BPF_FUNC_set_hash_invalid;
677 static int (*bpf_get_numa_node_id)(void) =
678   (void *) BPF_FUNC_get_numa_node_id;
679 static int (*bpf_skb_change_head)(void *ctx, u32 len, u64 flags) =
680   (void *) BPF_FUNC_skb_change_head;
681 static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
682   (void *) BPF_FUNC_xdp_adjust_head;
683 static int (*bpf_override_return)(void *pt_regs, unsigned long rc) =
684   (void *) BPF_FUNC_override_return;
685 static int (*bpf_sock_ops_cb_flags_set)(void *skops, int flags) =
686   (void *) BPF_FUNC_sock_ops_cb_flags_set;
687 static int (*bpf_msg_redirect_map)(void *msg, void *map, u32 key, u64 flags) =
688   (void *) BPF_FUNC_msg_redirect_map;
689 static int (*bpf_msg_apply_bytes)(void *msg, u32 bytes) =
690   (void *) BPF_FUNC_msg_apply_bytes;
691 static int (*bpf_msg_cork_bytes)(void *msg, u32 bytes) =
692   (void *) BPF_FUNC_msg_cork_bytes;
693 static int (*bpf_msg_pull_data)(void *msg, u32 start, u32 end, u64 flags) =
694   (void *) BPF_FUNC_msg_pull_data;
695 static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
696   (void *) BPF_FUNC_bind;
697 static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
698   (void *) BPF_FUNC_xdp_adjust_tail;
699 static int (*bpf_skb_get_xfrm_state)(void *ctx, u32 index, void *xfrm_state, u32 size, u64 flags) =
700   (void *) BPF_FUNC_skb_get_xfrm_state;
701 static int (*bpf_get_stack)(void *ctx, void *buf, u32 size, u64 flags) =
702   (void *) BPF_FUNC_get_stack;
703 static int (*bpf_skb_load_bytes_relative)(void *ctx, u32 offset, void *to, u32 len, u32 start_header) =
704   (void *) BPF_FUNC_skb_load_bytes_relative;
705 static int (*bpf_fib_lookup)(void *ctx, void *params, int plen, u32 flags) =
706   (void *) BPF_FUNC_fib_lookup;
707 static int (*bpf_sock_hash_update)(void *ctx, void *map, void *key, u64 flags) =
708   (void *) BPF_FUNC_sock_hash_update;
709 static int (*bpf_msg_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
710   (void *) BPF_FUNC_msg_redirect_hash;
711 static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, u64 flags) =
712   (void *) BPF_FUNC_sk_redirect_hash;
713 static int (*bpf_lwt_push_encap)(void *skb, u32 type, void *hdr, u32 len) =
714   (void *) BPF_FUNC_lwt_push_encap;
715 static int (*bpf_lwt_seg6_store_bytes)(void *ctx, u32 offset, const void *from, u32 len) =
716   (void *) BPF_FUNC_lwt_seg6_store_bytes;
717 static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, u32 offset, s32 delta) =
718   (void *) BPF_FUNC_lwt_seg6_adjust_srh;
719 static int (*bpf_lwt_seg6_action)(void *ctx, u32 action, void *param, u32 param_len) =
720   (void *) BPF_FUNC_lwt_seg6_action;
721 static int (*bpf_rc_keydown)(void *ctx, u32 protocol, u64 scancode, u32 toggle) =
722   (void *) BPF_FUNC_rc_keydown;
723 static int (*bpf_rc_repeat)(void *ctx) =
724   (void *) BPF_FUNC_rc_repeat;
725 static u64 (*bpf_skb_cgroup_id)(void *skb) =
726   (void *) BPF_FUNC_skb_cgroup_id;
727 static u64 (*bpf_get_current_cgroup_id)(void) =
728   (void *) BPF_FUNC_get_current_cgroup_id;
729 static u64 (*bpf_skb_ancestor_cgroup_id)(void *skb, int ancestor_level) =
730   (void *) BPF_FUNC_skb_ancestor_cgroup_id;
731 static void * (*bpf_get_local_storage)(void *map, u64 flags) =
732   (void *) BPF_FUNC_get_local_storage;
733 static int (*bpf_sk_select_reuseport)(void *reuse, void *map, void *key, u64 flags) =
734   (void *) BPF_FUNC_sk_select_reuseport;
735 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
736                                              struct bpf_sock_tuple *tuple,
737                                              int size, unsigned int netns_id,
738                                              unsigned long long flags) =
739   (void *) BPF_FUNC_sk_lookup_tcp;
740 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
741                                              struct bpf_sock_tuple *tuple,
742                                              int size, unsigned int netns_id,
743                                              unsigned long long flags) =
744   (void *) BPF_FUNC_sk_lookup_udp;
745 static int (*bpf_sk_release)(void *sk) =
746   (void *) BPF_FUNC_sk_release;
747 static int (*bpf_map_push_elem)(void *map, const void *value, u64 flags) =
748   (void *) BPF_FUNC_map_push_elem;
749 static int (*bpf_map_pop_elem)(void *map, void *value) =
750   (void *) BPF_FUNC_map_pop_elem;
751 static int (*bpf_map_peek_elem)(void *map, void *value) =
752   (void *) BPF_FUNC_map_peek_elem;
753 static int (*bpf_msg_push_data)(void *skb, u32 start, u32 len, u64 flags) =
754   (void *) BPF_FUNC_msg_push_data;
755 static int (*bpf_msg_pop_data)(void *msg, u32 start, u32 pop, u64 flags) =
756   (void *) BPF_FUNC_msg_pop_data;
757 static int (*bpf_rc_pointer_rel)(void *ctx, s32 rel_x, s32 rel_y) =
758   (void *) BPF_FUNC_rc_pointer_rel;
759 static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
760   (void *) BPF_FUNC_spin_lock;
761 static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
762   (void *) BPF_FUNC_spin_unlock;
763 static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
764   (void *) BPF_FUNC_sk_fullsock;
765 static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
766   (void *) BPF_FUNC_tcp_sock;
767 static int (*bpf_skb_ecn_set_ce)(void *ctx) =
768   (void *) BPF_FUNC_skb_ecn_set_ce;
769 static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
770   (void *) BPF_FUNC_get_listener_sock;
771 static void *(*bpf_sk_storage_get)(void *map, void *sk,
772                                    void *value, __u64 flags) =
773   (void *) BPF_FUNC_sk_storage_get;
774 static int (*bpf_sk_storage_delete)(void *map, void *sk) =
775   (void *)BPF_FUNC_sk_storage_delete;
776 static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal;
777 static long long (*bpf_tcp_gen_syncookie)(void *sk, void *ip,
778                                           int ip_len, void *tcp, int tcp_len) =
779   (void *) BPF_FUNC_tcp_gen_syncookie;
780 static int (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data,
781                              __u64 size) =
782   (void *)BPF_FUNC_skb_output;
783 
784 static int (*bpf_probe_read_user)(void *dst, __u32 size,
785                                   const void *unsafe_ptr) =
786   (void *)BPF_FUNC_probe_read_user;
787 static int (*bpf_probe_read_kernel)(void *dst, __u32 size,
788                                     const void *unsafe_ptr) =
789   (void *)BPF_FUNC_probe_read_kernel;
790 static int (*bpf_probe_read_user_str)(void *dst, __u32 size,
791             const void *unsafe_ptr) =
792   (void *)BPF_FUNC_probe_read_user_str;
793 static int (*bpf_probe_read_kernel_str)(void *dst, __u32 size,
794             const void *unsafe_ptr) =
795   (void *)BPF_FUNC_probe_read_kernel_str;
796 static int (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) =
797   (void *)BPF_FUNC_tcp_send_ack;
798 static int (*bpf_send_signal_thread)(__u32 sig) =
799   (void *)BPF_FUNC_send_signal_thread;
800 static __u64 (*bpf_jiffies64)(void) = (void *)BPF_FUNC_jiffies64;
801 
802 struct bpf_perf_event_data;
803 static int (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf,
804                                       __u32 size, __u64 flags) =
805   (void *)BPF_FUNC_read_branch_records;
806 static int (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino,
807                                           struct bpf_pidns_info *nsdata,
808                                           __u32 size) =
809   (void *)BPF_FUNC_get_ns_current_pid_tgid;
810 
811 struct bpf_map;
812 static int (*bpf_xdp_output)(void *ctx, struct bpf_map *map, __u64 flags,
813                              void *data, __u64 size) =
814   (void *)BPF_FUNC_xdp_output;
815 static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *)BPF_FUNC_get_netns_cookie;
816 static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) =
817   (void *)BPF_FUNC_get_current_ancestor_cgroup_id;
818 
819 struct sk_buff;
820 static int (*bpf_sk_assign)(void *skb, void *sk, __u64 flags) =
821   (void *)BPF_FUNC_sk_assign;
822 
823 static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *)BPF_FUNC_ktime_get_boot_ns;
824 
825 struct seq_file;
826 static int (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size,
827 			     const void *data, __u32 data_len) =
828   (void *)BPF_FUNC_seq_printf;
829 static int (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) =
830   (void *)BPF_FUNC_seq_write;
831 
832 static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *)BPF_FUNC_sk_cgroup_id;
833 static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) =
834   (void *)BPF_FUNC_sk_ancestor_cgroup_id;
835 
836 static int (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) =
837   (void *)BPF_FUNC_ringbuf_output;
838 static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) =
839   (void *)BPF_FUNC_ringbuf_reserve;
840 static void (*bpf_ringbuf_submit)(void *data, __u64 flags) =
841   (void *)BPF_FUNC_ringbuf_submit;
842 static void (*bpf_ringbuf_discard)(void *data, __u64 flags) =
843   (void *)BPF_FUNC_ringbuf_discard;
844 static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) =
845   (void *)BPF_FUNC_ringbuf_query;
846 
847 static int (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) =
848   (void *)BPF_FUNC_csum_level;
849 
850 struct tcp6_sock;
851 struct tcp_sock;
852 struct tcp_timewait_sock;
853 struct tcp_request_sock;
854 struct udp6_sock;
855 static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) =
856   (void *)BPF_FUNC_skc_to_tcp6_sock;
857 static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) =
858   (void *)BPF_FUNC_skc_to_tcp_sock;
859 static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) =
860   (void *)BPF_FUNC_skc_to_tcp_timewait_sock;
861 static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) =
862   (void *)BPF_FUNC_skc_to_tcp_request_sock;
863 static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) =
864   (void *)BPF_FUNC_skc_to_udp6_sock;
865 
866 struct task_struct;
867 static long (*bpf_get_task_stack)(struct task_struct *task, void *buf,
868 				  __u32 size, __u64 flags) =
869   (void *)BPF_FUNC_get_task_stack;
870 
871 struct bpf_sock_ops;
872 static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res,
873                                 u32 len, u64 flags) =
874   (void *)BPF_FUNC_load_hdr_opt;
875 static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from,
876                                  u32 len, u64 flags) =
877   (void *)BPF_FUNC_store_hdr_opt;
878 static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, u32 len,
879                                    u64 flags) =
880   (void *)BPF_FUNC_reserve_hdr_opt;
881 static void *(*bpf_inode_storage_get)(struct bpf_map *map, void *inode,
882                                       void *value, u64 flags) =
883   (void *)BPF_FUNC_inode_storage_get;
884 static int (*bpf_inode_storage_delete)(struct bpf_map *map, void *inode) =
885   (void *)BPF_FUNC_inode_storage_delete;
886 struct path;
887 static long (*bpf_d_path)(struct path *path, char *buf, u32 sz) =
888   (void *)BPF_FUNC_d_path;
889 static long (*bpf_copy_from_user)(void *dst, u32 size, const void *user_ptr) =
890   (void *)BPF_FUNC_copy_from_user;
891 
892 static long (*bpf_snprintf_btf)(char *str, u32 str_size, struct btf_ptr *ptr,
893 				u32 btf_ptr_size, u64 flags) =
894   (void *)BPF_FUNC_snprintf_btf;
895 static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr,
896 				  u32 ptr_size, u64 flags) =
897   (void *)BPF_FUNC_seq_printf_btf;
898 static u64 (*bpf_skb_cgroup_classid)(struct sk_buff *skb) =
899   (void *)BPF_FUNC_skb_cgroup_classid;
900 static long (*bpf_redirect_neigh)(u32 ifindex, struct bpf_redir_neigh *params,
901 				  u64 flags) =
902   (void *)BPF_FUNC_redirect_neigh;
903 static void * (*bpf_per_cpu_ptr)(const void *percpu_ptr, u32 cpu) =
904   (void *)BPF_FUNC_per_cpu_ptr;
905 static void * (*bpf_this_cpu_ptr)(const void *percpu_ptr) =
906   (void *)BPF_FUNC_this_cpu_ptr;
907 static long (*bpf_redirect_peer)(u32 ifindex, u64 flags) = (void *)BPF_FUNC_redirect_peer;
908 
909 static void *(*bpf_task_storage_get)(void *map, struct task_struct *task,
910 				     void *value, __u64 flags) =
911   (void *)BPF_FUNC_task_storage_get;
912 static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) =
913   (void *)BPF_FUNC_task_storage_delete;
914 static struct task_struct *(*bpf_get_current_task_btf)(void) =
915   (void *)BPF_FUNC_get_current_task_btf;
916 struct linux_binprm;
917 static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) =
918   (void *)BPF_FUNC_bprm_opts_set;
919 static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *)BPF_FUNC_ktime_get_coarse_ns;
920 struct inode;
921 static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) =
922   (void *)BPF_FUNC_ima_inode_hash;
923 struct file;
924 static struct socket *(*bpf_sock_from_file)(struct file *file) =
925   (void *)BPF_FUNC_sock_from_file;
926 static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len,
927                              __s32 len_diff, __u64 flags) =
928   (void *)BPF_FUNC_check_mtu;
929 static long (*bpf_for_each_map_elem)(void *map, void *callback_fn,
930                                      void *callback_ctx, __u64 flags) =
931   (void *)BPF_FUNC_for_each_map_elem;
932 static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt,
933                             __u64 *data, __u32 data_len) =
934   (void *)BPF_FUNC_snprintf;
935 
936 static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) =
937   (void *)BPF_FUNC_sys_bpf;
938 static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) =
939   (void *)BPF_FUNC_btf_find_by_name_kind;
940 static long (*bpf_sys_close)(__u32 fd) = (void *)BPF_FUNC_sys_close;
941 
942 struct bpf_timer;
943 static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) =
944   (void *)BPF_FUNC_timer_init;
945 static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) =
946   (void *)BPF_FUNC_timer_set_callback;
947 static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) =
948   (void *)BPF_FUNC_timer_start;
949 static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *)BPF_FUNC_timer_cancel;
950 
951 static __u64 (*bpf_get_func_ip)(void *ctx) = (void *)BPF_FUNC_get_func_ip;
952 static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *)BPF_FUNC_get_attach_cookie;
953 static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *)BPF_FUNC_task_pt_regs;
954 
955 static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) =
956   (void *)BPF_FUNC_get_branch_snapshot;
957 static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data,
958                                  __u32 data_len) =
959   (void *)BPF_FUNC_trace_vprintk;
960 static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) =
961   (void *)BPF_FUNC_skc_to_unix_sock;
962 static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags,
963 				__u64 *res) =
964   (void *)BPF_FUNC_kallsyms_lookup_name;
965 static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn,
966 			    void *callback_ctx, __u64 flags) =
967   (void *)BPF_FUNC_find_vma;
968 static long (*bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) =
969   (void *)BPF_FUNC_loop;
970 static long (*bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) =
971   (void *)BPF_FUNC_strncmp;
972 static long (*bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) =
973   (void *)BPF_FUNC_get_func_arg;
974 static long (*bpf_get_func_ret)(void *ctx, __u64 *value) = (void *)BPF_FUNC_get_func_ret;
975 static long (*bpf_get_func_arg_cnt)(void *ctx) = (void *)BPF_FUNC_get_func_arg_cnt;
976 static int (*bpf_get_retval)(void) = (void *)BPF_FUNC_get_retval;
977 static int (*bpf_set_retval)(int retval) = (void *)BPF_FUNC_set_retval;
978 static __u64 (*bpf_xdp_get_buff_len)(struct xdp_md *xdp_md) = (void *)BPF_FUNC_xdp_get_buff_len;
979 static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) =
980   (void *)BPF_FUNC_xdp_load_bytes;
981 static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) =
982   (void *)BPF_FUNC_xdp_store_bytes;
983 static long (*bpf_copy_from_user_task)(void *dst, __u32 size, const void *user_ptr,
984 				       struct task_struct *tsk, __u64 flags) =
985   (void *)BPF_FUNC_copy_from_user_task;
986 static long (*bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __u32 tstamp_type) =
987   (void *)BPF_FUNC_skb_set_tstamp;
988 static long (*bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) =
989   (void *)BPF_FUNC_ima_file_hash;
990 static void *(*bpf_kptr_xchg)(void *map_value, void *ptr) = (void *)BPF_FUNC_kptr_xchg;
991 static void *(*bpf_map_lookup_percpu_elem)(void *map, const void *key, __u32 cpu) =
992   (void *)BPF_FUNC_map_lookup_percpu_elem;
993 
994 struct mptcp_sock;
995 struct bpf_dynptr;
996 struct iphdr;
997 struct ipv6hdr;
998 struct tcphdr;
999 static struct mptcp_sock *(*bpf_skc_to_mptcp_sock)(void *sk) =
1000   (void *)BPF_FUNC_skc_to_mptcp_sock;
1001 static long (*bpf_dynptr_from_mem)(void *data, __u32 size, __u64 flags,
1002 				   struct bpf_dynptr *ptr) =
1003   (void *)BPF_FUNC_dynptr_from_mem;
1004 static long (*bpf_ringbuf_reserve_dynptr)(void *ringbuf, __u32 size, __u64 flags,
1005 					  struct bpf_dynptr *ptr) =
1006   (void *)BPF_FUNC_ringbuf_reserve_dynptr;
1007 static void (*bpf_ringbuf_submit_dynptr)(struct bpf_dynptr *ptr, __u64 flags) =
1008   (void *)BPF_FUNC_ringbuf_submit_dynptr;
1009 static void (*bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) =
1010   (void *)BPF_FUNC_ringbuf_discard_dynptr;
1011 static long (*bpf_dynptr_read)(void *dst, __u32 len, const struct bpf_dynptr *src, __u32 offset,
1012 			       __u64 flags) =
1013   (void *)BPF_FUNC_dynptr_read;
1014 static long (*bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len,
1015 				__u64 flags) =
1016   (void *)BPF_FUNC_dynptr_write;
1017 static void *(*bpf_dynptr_data)(const struct bpf_dynptr *ptr, __u32 offset, __u32 len) =
1018   (void *)BPF_FUNC_dynptr_data;
1019 static __s64 (*bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th,
1020 					       __u32 th_len) =
1021   (void *)BPF_FUNC_tcp_raw_gen_syncookie_ipv4;
1022 static __s64 (*bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th,
1023 					       __u32 th_len) =
1024   (void *)BPF_FUNC_tcp_raw_gen_syncookie_ipv6;
1025 static long (*bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th) =
1026   (void *)BPF_FUNC_tcp_raw_check_syncookie_ipv4;
1027 static long (*bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) =
1028   (void *)BPF_FUNC_tcp_raw_check_syncookie_ipv6;
1029 
1030 static __u64 (*bpf_ktime_get_tai_ns)(void) = (void *)BPF_FUNC_ktime_get_tai_ns;
1031 static long (*bpf_user_ringbuf_drain)(void *map, void *callback_fn, void *ctx, __u64 flags) =
1032   (void *)BPF_FUNC_user_ringbuf_drain;
1033 
1034 struct cgroup;
1035 static void *(*bpf_cgrp_storage_get)(void *map, struct cgroup *cgroup, void *value, __u64 flags) =
1036   (void *)BPF_FUNC_cgrp_storage_get;
1037 static long (*bpf_cgrp_storage_delete)(void *map, struct cgroup *cgroup) =
1038   (void *)BPF_FUNC_cgrp_storage_delete;
1039 
1040 /* llvm builtin functions that eBPF C program may use to
1041  * emit BPF_LD_ABS and BPF_LD_IND instructions
1042  */
1043 unsigned long long load_byte(void *skb,
1044   unsigned long long off) asm("llvm.bpf.load.byte");
1045 unsigned long long load_half(void *skb,
1046   unsigned long long off) asm("llvm.bpf.load.half");
1047 unsigned long long load_word(void *skb,
1048   unsigned long long off) asm("llvm.bpf.load.word");
1049 
1050 /* a helper structure used by eBPF C program
1051  * to describe map attributes to elf_bpf loader
1052  */
1053 struct bpf_map_def {
1054   unsigned int type;
1055   unsigned int key_size;
1056   unsigned int value_size;
1057   unsigned int max_entries;
1058 };
1059 
1060 static int (*bpf_skb_store_bytes)(void *ctx, unsigned long long off, void *from,
1061                                   unsigned long long len, unsigned long long flags) =
1062   (void *) BPF_FUNC_skb_store_bytes;
1063 static int (*bpf_l3_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
1064                                   unsigned long long to, unsigned long long flags) =
1065   (void *) BPF_FUNC_l3_csum_replace;
1066 static int (*bpf_l4_csum_replace)(void *ctx, unsigned long long off, unsigned long long from,
1067                                   unsigned long long to, unsigned long long flags) =
1068   (void *) BPF_FUNC_l4_csum_replace;
1069 
1070 static inline __attribute__((always_inline))
1071 u16 bpf_ntohs(u16 val) {
1072   /* will be recognized by gcc into rotate insn and eventually rolw 8 */
1073   return (val << 8) | (val >> 8);
1074 }
1075 
1076 static inline __attribute__((always_inline))
1077 u32 bpf_ntohl(u32 val) {
1078   /* gcc will use bswapsi2 insn */
1079   return __builtin_bswap32(val);
1080 }
1081 
1082 static inline __attribute__((always_inline))
1083 u64 bpf_ntohll(u64 val) {
1084   /* gcc will use bswapdi2 insn */
1085   return __builtin_bswap64(val);
1086 }
1087 
1088 static inline __attribute__((always_inline))
1089 unsigned __int128 bpf_ntoh128(unsigned __int128 val) {
1090   return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64));
1091 }
1092 
1093 static inline __attribute__((always_inline))
1094 u16 bpf_htons(u16 val) {
1095   return bpf_ntohs(val);
1096 }
1097 
1098 static inline __attribute__((always_inline))
1099 u32 bpf_htonl(u32 val) {
1100   return bpf_ntohl(val);
1101 }
1102 
1103 static inline __attribute__((always_inline))
1104 u64 bpf_htonll(u64 val) {
1105   return bpf_ntohll(val);
1106 }
1107 
1108 static inline __attribute__((always_inline))
1109 unsigned __int128 bpf_hton128(unsigned __int128 val) {
1110   return bpf_ntoh128(val);
1111 }
1112 
1113 static inline __attribute__((always_inline))
1114 u64 load_dword(void *skb, u64 off) {
1115   return ((u64)load_word(skb, off) << 32) | load_word(skb, off + 4);
1116 }
1117 
1118 void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte");
1119 void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half");
1120 void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word");
1121 u64 bpf_pseudo_fd(u64, u64) asm("llvm.bpf.pseudo");
1122 
1123 static inline void __attribute__((always_inline))
1124 bpf_store_dword(void *skb, u64 off, u64 val) {
1125   bpf_store_word(skb, off, (u32)val);
1126   bpf_store_word(skb, off + 4, val >> 32);
1127 }
1128 
1129 #define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL))
1130 #define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1))
1131 
1132 static inline __attribute__((always_inline))
1133 unsigned int bpf_log2(unsigned int v)
1134 {
1135   unsigned int r;
1136   unsigned int shift;
1137 
1138   r = (v > 0xFFFF) << 4; v >>= r;
1139   shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
1140   shift = (v > 0xF) << 2; v >>= shift; r |= shift;
1141   shift = (v > 0x3) << 1; v >>= shift; r |= shift;
1142   r |= (v >> 1);
1143   return r;
1144 }
1145 
1146 static inline __attribute__((always_inline))
1147 unsigned int bpf_log2l(unsigned long v)
1148 {
1149   unsigned int hi = v >> 32;
1150   if (hi)
1151     return bpf_log2(hi) + 32 + 1;
1152   else
1153     return bpf_log2(v) + 1;
1154 }
1155 
1156 struct bpf_context;
1157 
1158 static inline __attribute__((always_inline))
1159 BCC_SEC_HELPERS
1160 u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) {
1161   if (bofs == 0 && bsz == 8) {
1162     return load_byte(pkt, off);
1163   } else if (bofs + bsz <= 8) {
1164     return load_byte(pkt, off) >> (8 - (bofs + bsz))  &  MASK(bsz);
1165   } else if (bofs == 0 && bsz == 16) {
1166     return load_half(pkt, off);
1167   } else if (bofs + bsz <= 16) {
1168     return load_half(pkt, off) >> (16 - (bofs + bsz))  &  MASK(bsz);
1169   } else if (bofs == 0 && bsz == 32) {
1170     return load_word(pkt, off);
1171   } else if (bofs + bsz <= 32) {
1172     return load_word(pkt, off) >> (32 - (bofs + bsz))  &  MASK(bsz);
1173   } else if (bofs == 0 && bsz == 64) {
1174     return load_dword(pkt, off);
1175   } else if (bofs + bsz <= 64) {
1176     return load_dword(pkt, off) >> (64 - (bofs + bsz))  &  MASK(bsz);
1177   }
1178   return 0;
1179 }
1180 
1181 static inline __attribute__((always_inline))
1182 BCC_SEC_HELPERS
1183 void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) {
1184   // The load_xxx function does a bswap before returning the short/word/dword,
1185   // so the value in register will always be host endian. However, the bytes
1186   // written back need to be in network order.
1187   if (bofs == 0 && bsz == 8) {
1188     bpf_skb_store_bytes(pkt, off, &val, 1, 0);
1189   } else if (bofs + bsz <= 8) {
1190     u8 v = load_byte(pkt, off);
1191     v &= ~(MASK(bsz) << (8 - (bofs + bsz)));
1192     v |= ((val & MASK(bsz)) << (8 - (bofs + bsz)));
1193     bpf_skb_store_bytes(pkt, off, &v, 1, 0);
1194   } else if (bofs == 0 && bsz == 16) {
1195     u16 v = bpf_htons(val);
1196     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
1197   } else if (bofs + bsz <= 16) {
1198     u16 v = load_half(pkt, off);
1199     v &= ~(MASK(bsz) << (16 - (bofs + bsz)));
1200     v |= ((val & MASK(bsz)) << (16 - (bofs + bsz)));
1201     v = bpf_htons(v);
1202     bpf_skb_store_bytes(pkt, off, &v, 2, 0);
1203   } else if (bofs == 0 && bsz == 32) {
1204     u32 v = bpf_htonl(val);
1205     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
1206   } else if (bofs + bsz <= 32) {
1207     u32 v = load_word(pkt, off);
1208     v &= ~(MASK(bsz) << (32 - (bofs + bsz)));
1209     v |= ((val & MASK(bsz)) << (32 - (bofs + bsz)));
1210     v = bpf_htonl(v);
1211     bpf_skb_store_bytes(pkt, off, &v, 4, 0);
1212   } else if (bofs == 0 && bsz == 64) {
1213     u64 v = bpf_htonll(val);
1214     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
1215   } else if (bofs + bsz <= 64) {
1216     u64 v = load_dword(pkt, off);
1217     v &= ~(MASK(bsz) << (64 - (bofs + bsz)));
1218     v |= ((val & MASK(bsz)) << (64 - (bofs + bsz)));
1219     v = bpf_htonll(v);
1220     bpf_skb_store_bytes(pkt, off, &v, 8, 0);
1221   }
1222 }
1223 
1224 static inline __attribute__((always_inline))
1225 BCC_SEC_HELPERS
1226 void * bpf_map_lookup_elem_(uintptr_t map, void *key) {
1227   return bpf_map_lookup_elem((void *)map, key);
1228 }
1229 
1230 static inline __attribute__((always_inline))
1231 BCC_SEC_HELPERS
1232 int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) {
1233   return bpf_map_update_elem((void *)map, key, value, flags);
1234 }
1235 
1236 static inline __attribute__((always_inline))
1237 BCC_SEC_HELPERS
1238 int bpf_map_delete_elem_(uintptr_t map, void *key) {
1239   return bpf_map_delete_elem((void *)map, key);
1240 }
1241 
1242 static inline __attribute__((always_inline))
1243 BCC_SEC_HELPERS
1244 int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
1245   switch (flags & 0xf) {
1246     case 2:
1247       return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
1248     case 4:
1249       return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
1250     case 8:
1251       return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
1252     default:
1253       {}
1254   }
1255   return bpf_l3_csum_replace(ctx, off, from, to, flags);
1256 }
1257 
1258 static inline __attribute__((always_inline))
1259 BCC_SEC_HELPERS
1260 int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
1261   switch (flags & 0xf) {
1262     case 2:
1263       return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
1264     case 4:
1265       return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
1266     case 8:
1267       return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
1268     default:
1269       {}
1270   }
1271   return bpf_l4_csum_replace(ctx, off, from, to, flags);
1272 }
1273 
1274 int incr_cksum_l3(void *off, u64 oldval, u64 newval) asm("llvm.bpf.extra");
1275 int incr_cksum_l4(void *off, u64 oldval, u64 newval, u64 flags) asm("llvm.bpf.extra");
1276 int bpf_num_cpus() asm("llvm.bpf.extra");
1277 
1278 struct pt_regs;
1279 int bpf_usdt_readarg(int argc, struct pt_regs *ctx, void *arg) asm("llvm.bpf.extra");
1280 int bpf_usdt_readarg_p(int argc, struct pt_regs *ctx, void *buf, u64 len) asm("llvm.bpf.extra");
1281 
1282 /* Scan the ARCH passed in from ARCH env variable (see kbuild_helper.cc) */
1283 #if defined(__TARGET_ARCH_x86)
1284 #define bpf_target_x86
1285 #define bpf_target_defined
1286 #elif defined(__TARGET_ARCH_s390x)
1287 #define bpf_target_s390x
1288 #define bpf_target_defined
1289 #elif defined(__TARGET_ARCH_arm64)
1290 #define bpf_target_arm64
1291 #define bpf_target_defined
1292 #elif defined(__TARGET_ARCH_powerpc)
1293 #define bpf_target_powerpc
1294 #define bpf_target_defined
1295 #elif defined(__TARGET_ARCH_mips)
1296 #define bpf_target_mips
1297 #define bpf_target_defined
1298 #elif defined(__TARGET_ARCH_riscv64)
1299 #define bpf_target_riscv64
1300 #define bpf_target_defined
1301 #elif defined(__TARGET_ARCH_loongarch)
1302 #define bpf_target_loongarch
1303 #define bpf_target_defined
1304 #else
1305 #undef bpf_target_defined
1306 #endif
1307 
1308 /* Fall back to what the compiler says */
1309 #ifndef bpf_target_defined
1310 #if defined(__x86_64__)
1311 #define bpf_target_x86
1312 #elif defined(__s390x__)
1313 #define bpf_target_s390x
1314 #elif defined(__aarch64__)
1315 #define bpf_target_arm64
1316 #elif defined(__powerpc__)
1317 #define bpf_target_powerpc
1318 #elif defined(__mips__)
1319 #define bpf_target_mips
1320 #elif defined(__riscv) && (__riscv_xlen == 64)
1321 #define bpf_target_riscv64
1322 #elif defined(__loongarch__)
1323 #define bpf_target_loongarch
1324 #endif
1325 #endif
1326 
1327 #if defined(bpf_target_powerpc)
1328 #define PT_REGS_PARM1(ctx)	((ctx)->gpr[3])
1329 #define PT_REGS_PARM2(ctx)	((ctx)->gpr[4])
1330 #define PT_REGS_PARM3(ctx)	((ctx)->gpr[5])
1331 #define PT_REGS_PARM4(ctx)	((ctx)->gpr[6])
1332 #define PT_REGS_PARM5(ctx)	((ctx)->gpr[7])
1333 #define PT_REGS_PARM6(ctx)	((ctx)->gpr[8])
1334 #define PT_REGS_RC(ctx)		((ctx)->gpr[3])
1335 #define PT_REGS_IP(ctx)		((ctx)->nip)
1336 #define PT_REGS_SP(ctx)		((ctx)->gpr[1])
1337 #elif defined(bpf_target_s390x)
1338 #define PT_REGS_PARM1(x) ((x)->gprs[2])
1339 #define PT_REGS_PARM2(x) ((x)->gprs[3])
1340 #define PT_REGS_PARM3(x) ((x)->gprs[4])
1341 #define PT_REGS_PARM4(x) ((x)->gprs[5])
1342 #define PT_REGS_PARM5(x) ((x)->gprs[6])
1343 #define PT_REGS_RET(x) ((x)->gprs[14])
1344 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
1345 #define PT_REGS_RC(x) ((x)->gprs[2])
1346 #define PT_REGS_SP(x) ((x)->gprs[15])
1347 #define PT_REGS_IP(x) ((x)->psw.addr)
1348 #elif defined(bpf_target_x86)
1349 #define PT_REGS_PARM1(ctx)	((ctx)->di)
1350 #define PT_REGS_PARM2(ctx)	((ctx)->si)
1351 #define PT_REGS_PARM3(ctx)	((ctx)->dx)
1352 #define PT_REGS_PARM4(ctx)	((ctx)->cx)
1353 #define PT_REGS_PARM5(ctx)	((ctx)->r8)
1354 #define PT_REGS_PARM6(ctx)	((ctx)->r9)
1355 #define PT_REGS_RET(ctx)	((ctx)->sp)
1356 #define PT_REGS_FP(ctx)         ((ctx)->bp) /* Works only with CONFIG_FRAME_POINTER */
1357 #define PT_REGS_RC(ctx)		((ctx)->ax)
1358 #define PT_REGS_IP(ctx)		((ctx)->ip)
1359 #define PT_REGS_SP(ctx)		((ctx)->sp)
1360 #elif defined(bpf_target_arm64)
1361 #define PT_REGS_PARM1(x)	((x)->regs[0])
1362 #define PT_REGS_PARM2(x)	((x)->regs[1])
1363 #define PT_REGS_PARM3(x)	((x)->regs[2])
1364 #define PT_REGS_PARM4(x)	((x)->regs[3])
1365 #define PT_REGS_PARM5(x)	((x)->regs[4])
1366 #define PT_REGS_PARM6(x)	((x)->regs[5])
1367 #define PT_REGS_RET(x)		((x)->regs[30])
1368 #define PT_REGS_FP(x)		((x)->regs[29]) /*  Works only with CONFIG_FRAME_POINTER */
1369 #define PT_REGS_RC(x)		((x)->regs[0])
1370 #define PT_REGS_SP(x)		((x)->sp)
1371 #define PT_REGS_IP(x)		((x)->pc)
1372 #elif defined(bpf_target_mips)
1373 #define PT_REGS_PARM1(x) ((x)->regs[4])
1374 #define PT_REGS_PARM2(x) ((x)->regs[5])
1375 #define PT_REGS_PARM3(x) ((x)->regs[6])
1376 #define PT_REGS_PARM4(x) ((x)->regs[7])
1377 #define PT_REGS_PARM5(x) ((x)->regs[8])
1378 #define PT_REGS_PARM6(x) ((x)->regs[9])
1379 #define PT_REGS_RET(x) ((x)->regs[31])
1380 #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
1381 #define PT_REGS_RC(x) ((x)->regs[2])
1382 #define PT_REGS_SP(x) ((x)->regs[29])
1383 #define PT_REGS_IP(x) ((x)->cp0_epc)
1384 #elif defined(bpf_target_riscv64)
1385 /* riscv64 provides struct user_pt_regs instead of struct pt_regs to userspace */
1386 #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
1387 #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->a0)
1388 #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->a1)
1389 #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->a2)
1390 #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->a3)
1391 #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->a4)
1392 #define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->a5)
1393 #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->ra)
1394 #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->s0) /* Works only with CONFIG_FRAME_POINTER */
1395 #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->a0)
1396 #define PT_REGS_SP(x) (__PT_REGS_CAST(x)->sp)
1397 #define PT_REGS_IP(x) (__PT_REGS_CAST(x)->pc)
1398 #elif defined(bpf_target_loongarch)
1399 #define PT_REGS_PARM1(x) ((x)->regs[4])
1400 #define PT_REGS_PARM2(x) ((x)->regs[5])
1401 #define PT_REGS_PARM3(x) ((x)->regs[6])
1402 #define PT_REGS_PARM4(x) ((x)->regs[7])
1403 #define PT_REGS_PARM5(x) ((x)->regs[8])
1404 #define PT_REGS_PARM6(x) ((x)->regs[9])
1405 #define PT_REGS_RET(x) ((x)->regs[1])
1406 #define PT_REGS_FP(x) ((x)->regs[22]) /* Works only with CONFIG_FRAME_POINTER */
1407 #define PT_REGS_RC(x) ((x)->regs[4])
1408 #define PT_REGS_SP(x) ((x)->regs[3])
1409 #define PT_REGS_IP(x) ((x)->csr_era)
1410 #else
1411 #error "bcc does not support this platform yet"
1412 #endif
1413 
1414 #if defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) && !defined(__s390x__)
1415 #define PT_REGS_SYSCALL_CTX(ctx)	((struct pt_regs *)PT_REGS_PARM1(ctx))
1416 #else
1417 #define PT_REGS_SYSCALL_CTX(ctx)	(ctx)
1418 #endif
1419 /* Helpers for syscall params. Pass in a ctx returned from PT_REGS_SYSCALL_CTX.
1420  */
1421 #define PT_REGS_PARM1_SYSCALL(ctx)	PT_REGS_PARM1(ctx)
1422 #define PT_REGS_PARM2_SYSCALL(ctx)	PT_REGS_PARM2(ctx)
1423 #define PT_REGS_PARM3_SYSCALL(ctx)	PT_REGS_PARM3(ctx)
1424 #if defined(bpf_target_x86)
1425 #define PT_REGS_PARM4_SYSCALL(ctx)	((ctx)->r10) /* for syscall only */
1426 #else
1427 #define PT_REGS_PARM4_SYSCALL(ctx)	PT_REGS_PARM4(ctx)
1428 #endif
1429 #define PT_REGS_PARM5_SYSCALL(ctx)	PT_REGS_PARM5(ctx)
1430 #ifdef PT_REGS_PARM6
1431 #define PT_REGS_PARM6_SYSCALL(ctx)	PT_REGS_PARM6(ctx)
1432 #endif
1433 
1434 #define lock_xadd(ptr, val) ((void)__sync_fetch_and_add(ptr, val))
1435 
1436 #define TRACEPOINT_PROBE(category, event) \
1437 int tracepoint__##category##__##event(struct tracepoint__##category##__##event *args)
1438 
1439 #define RAW_TRACEPOINT_PROBE(event) \
1440 int raw_tracepoint__##event(struct bpf_raw_tracepoint_args *ctx)
1441 
1442 /* BPF_PROG macro allows to define trampoline function,
1443  * borrowed from kernel bpf selftest code.
1444  */
1445 #define ___bpf_concat(a, b) a ## b
1446 #define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
1447 #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
1448 #define ___bpf_narg(...) \
1449         ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
1450 
1451 #define ___bpf_ctx_cast0() ctx
1452 #define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
1453 #define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
1454 #define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
1455 #define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
1456 #define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
1457 #define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
1458 #define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
1459 #define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
1460 #define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
1461 #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
1462 #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
1463 #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
1464 #define ___bpf_ctx_cast(args...) \
1465         ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
1466 
1467 #define BPF_PROG(name, args...)                                 \
1468 int name(unsigned long long *ctx);                              \
1469 __attribute__((always_inline))                                  \
1470 static int ____##name(unsigned long long *ctx, ##args);         \
1471 int name(unsigned long long *ctx)                               \
1472 {                                                               \
1473         int __ret;                                              \
1474                                                                 \
1475         _Pragma("GCC diagnostic push")                          \
1476         _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")  \
1477         __ret = ____##name(___bpf_ctx_cast(args));              \
1478         _Pragma("GCC diagnostic pop")                           \
1479         return __ret;                                           \
1480 }                                                               \
1481 static int ____##name(unsigned long long *ctx, ##args)
1482 
1483 #define KFUNC_PROBE(event, args...) \
1484         BPF_PROG(kfunc__vmlinux__ ## event, ##args)
1485 
1486 #define KRETFUNC_PROBE(event, args...) \
1487         BPF_PROG(kretfunc__vmlinux__ ## event, ##args)
1488 
1489 #define MODULE_KFUNC_PROBE(module, event, args...) \
1490         BPF_PROG(kfunc__ ## module ## __ ## event, ##args)
1491 
1492 #define MODULE_KRETFUNC_PROBE(module, event, args...) \
1493         BPF_PROG(kretfunc__ ## module ## __ ## event, ##args)
1494 
1495 #define KMOD_RET(event, args...) \
1496         BPF_PROG(kmod_ret__ ## event, ##args)
1497 
1498 #define LSM_PROBE(event, args...) \
1499         BPF_PROG(lsm__ ## event, ##args)
1500 
1501 #define BPF_ITER(target) \
1502         int bpf_iter__ ## target (struct bpf_iter__ ## target *ctx)
1503 
1504 #define TP_DATA_LOC_READ_CONST(dst, field, length)                        \
1505         do {                                                              \
1506             unsigned short __offset = args->data_loc_##field & 0xFFFF;    \
1507             bpf_probe_read((void *)dst, length, (char *)args + __offset); \
1508         } while (0)
1509 
1510 #define TP_DATA_LOC_READ(dst, field)                                        \
1511         do {                                                                \
1512             unsigned short __offset = args->data_loc_##field & 0xFFFF;      \
1513             unsigned short __length = args->data_loc_##field >> 16;         \
1514             bpf_probe_read((void *)dst, __length, (char *)args + __offset); \
1515         } while (0)
1516 
1517 #define TP_DATA_LOC_READ_STR(dst, field, length)                                \
1518         do {                                                                    \
1519             unsigned short __offset = args->data_loc_##field & 0xFFFF;          \
1520             bpf_probe_read_str((void *)dst, length, (char *)args + __offset);   \
1521         } while (0)
1522 
1523 #endif
1524 )********"
1525