xref: /aosp_15_r20/external/bcc/libbpf-tools/syscount.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Anton Protopopov
3 //
4 // Based on syscount(8) from BCC by Sasha Goldshtein
5 #include <vmlinux.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_tracing.h>
8 #include <bpf/bpf_core_read.h>
9 #include "syscount.h"
10 #include "maps.bpf.h"
11 
12 const volatile bool filter_cg = false;
13 const volatile bool count_by_process = false;
14 const volatile bool measure_latency = false;
15 const volatile bool filter_failed = false;
16 const volatile int filter_errno = false;
17 const volatile pid_t filter_pid = 0;
18 
19 struct {
20 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
21 	__type(key, u32);
22 	__type(value, u32);
23 	__uint(max_entries, 1);
24 } cgroup_map SEC(".maps");
25 
26 struct {
27 	__uint(type, BPF_MAP_TYPE_HASH);
28 	__uint(max_entries, MAX_ENTRIES);
29 	__type(key, u32);
30 	__type(value, u64);
31 } start SEC(".maps");
32 
33 struct {
34 	__uint(type, BPF_MAP_TYPE_HASH);
35 	__uint(max_entries, MAX_ENTRIES);
36 	__type(key, u32);
37 	__type(value, struct data_t);
38 } data SEC(".maps");
39 
40 static __always_inline
save_proc_name(struct data_t * val)41 void save_proc_name(struct data_t *val)
42 {
43 	struct task_struct *current = (void *)bpf_get_current_task();
44 
45 	/* We should save the process name every time because it can be
46 	 * changed (e.g., by exec).  This can be optimized later by managing
47 	 * this field with the help of tp/sched/sched_process_exec and
48 	 * raw_tp/task_rename. */
49 	BPF_CORE_READ_STR_INTO(&val->comm, current, group_leader, comm);
50 }
51 
52 SEC("tracepoint/raw_syscalls/sys_enter")
sys_enter(struct trace_event_raw_sys_enter * args)53 int sys_enter(struct trace_event_raw_sys_enter *args)
54 {
55 	u64 id = bpf_get_current_pid_tgid();
56 	pid_t pid = id >> 32;
57 	u32 tid = id;
58 	u64 ts;
59 
60 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
61 		return 0;
62 
63 	if (filter_pid && pid != filter_pid)
64 		return 0;
65 
66 	ts = bpf_ktime_get_ns();
67 	bpf_map_update_elem(&start, &tid, &ts, 0);
68 	return 0;
69 }
70 
71 SEC("tracepoint/raw_syscalls/sys_exit")
sys_exit(struct trace_event_raw_sys_exit * args)72 int sys_exit(struct trace_event_raw_sys_exit *args)
73 {
74 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
75 		return 0;
76 
77 	u64 id = bpf_get_current_pid_tgid();
78 	static const struct data_t zero;
79 	pid_t pid = id >> 32;
80 	struct data_t *val;
81 	u64 *start_ts, lat = 0;
82 	u32 tid = id;
83 	u32 key;
84 
85 	/* this happens when there is an interrupt */
86 	if (args->id == -1)
87 		return 0;
88 
89 	if (filter_pid && pid != filter_pid)
90 		return 0;
91 	if (filter_failed && args->ret >= 0)
92 		return 0;
93 	if (filter_errno && args->ret != -filter_errno)
94 		return 0;
95 
96 	if (measure_latency) {
97 		start_ts = bpf_map_lookup_elem(&start, &tid);
98 		if (!start_ts)
99 			return 0;
100 		lat = bpf_ktime_get_ns() - *start_ts;
101 	}
102 
103 	key = (count_by_process) ? pid : args->id;
104 	val = bpf_map_lookup_or_try_init(&data, &key, &zero);
105 	if (val) {
106 		__sync_fetch_and_add(&val->count, 1);
107 		if (count_by_process)
108 			save_proc_name(val);
109 		if (measure_latency)
110 			__sync_fetch_and_add(&val->total_ns, lat);
111 	}
112 	return 0;
113 }
114 
115 char LICENSE[] SEC("license") = "GPL";
116