xref: /aosp_15_r20/external/bcc/libbpf-tools/tcpconnect.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Anton Protopopov
3 //
4 // Based on tcpconnect(8) from BCC by Brendan Gregg
5 #include <vmlinux.h>
6 
7 #include <bpf/bpf_helpers.h>
8 #include <bpf/bpf_core_read.h>
9 #include <bpf/bpf_tracing.h>
10 
11 #include "maps.bpf.h"
12 #include "tcpconnect.h"
13 
14 const volatile int filter_ports[MAX_PORTS];
15 const volatile int filter_ports_len = 0;
16 const volatile uid_t filter_uid = -1;
17 const volatile pid_t filter_pid = 0;
18 const volatile bool do_count = 0;
19 const volatile bool source_port = 0;
20 
21 /* Define here, because there are conflicts with include files */
22 #define AF_INET		2
23 #define AF_INET6	10
24 
25 struct {
26 	__uint(type, BPF_MAP_TYPE_HASH);
27 	__uint(max_entries, MAX_ENTRIES);
28 	__type(key, u32);
29 	__type(value, struct sock *);
30 } sockets SEC(".maps");
31 
32 struct {
33 	__uint(type, BPF_MAP_TYPE_HASH);
34 	__uint(max_entries, MAX_ENTRIES);
35 	__type(key, struct ipv4_flow_key);
36 	__type(value, u64);
37 } ipv4_count SEC(".maps");
38 
39 struct {
40 	__uint(type, BPF_MAP_TYPE_HASH);
41 	__uint(max_entries, MAX_ENTRIES);
42 	__type(key, struct ipv6_flow_key);
43 	__type(value, u64);
44 } ipv6_count SEC(".maps");
45 
46 struct {
47 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
48 	__uint(key_size, sizeof(u32));
49 	__uint(value_size, sizeof(u32));
50 } events SEC(".maps");
51 
filter_port(__u16 port)52 static __always_inline bool filter_port(__u16 port)
53 {
54 	int i;
55 
56 	if (filter_ports_len == 0)
57 		return false;
58 
59 	for (i = 0; i < filter_ports_len && i < MAX_PORTS; i++) {
60 		if (port == filter_ports[i])
61 			return false;
62 	}
63 	return true;
64 }
65 
66 static __always_inline int
enter_tcp_connect(struct pt_regs * ctx,struct sock * sk)67 enter_tcp_connect(struct pt_regs *ctx, struct sock *sk)
68 {
69 	__u64 pid_tgid = bpf_get_current_pid_tgid();
70 	__u32 pid = pid_tgid >> 32;
71 	__u32 tid = pid_tgid;
72 	__u32 uid;
73 
74 	if (filter_pid && pid != filter_pid)
75 		return 0;
76 
77 	uid = bpf_get_current_uid_gid();
78 	if (filter_uid != (uid_t) -1 && uid != filter_uid)
79 		return 0;
80 
81 	bpf_map_update_elem(&sockets, &tid, &sk, 0);
82 	return 0;
83 }
84 
count_v4(struct sock * sk,__u16 sport,__u16 dport)85 static  __always_inline void count_v4(struct sock *sk, __u16 sport, __u16 dport)
86 {
87 	struct ipv4_flow_key key = {};
88 	static __u64 zero;
89 	__u64 *val;
90 
91 	BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr);
92 	BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr);
93 	key.sport = sport;
94 	key.dport = dport;
95 	val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero);
96 	if (val)
97 		__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
98 }
99 
count_v6(struct sock * sk,__u16 sport,__u16 dport)100 static __always_inline void count_v6(struct sock *sk, __u16 sport, __u16 dport)
101 {
102 	struct ipv6_flow_key key = {};
103 	static const __u64 zero;
104 	__u64 *val;
105 
106 	BPF_CORE_READ_INTO(&key.saddr, sk,
107 			   __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
108 	BPF_CORE_READ_INTO(&key.daddr, sk,
109 			   __sk_common.skc_v6_daddr.in6_u.u6_addr32);
110 	key.sport = sport;
111 	key.dport = dport;
112 
113 	val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero);
114 	if (val)
115 		__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
116 }
117 
118 static __always_inline void
trace_v4(struct pt_regs * ctx,pid_t pid,struct sock * sk,__u16 sport,__u16 dport)119 trace_v4(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport)
120 {
121 	struct event event = {};
122 
123 	event.af = AF_INET;
124 	event.pid = pid;
125 	event.uid = bpf_get_current_uid_gid();
126 	event.ts_us = bpf_ktime_get_ns() / 1000;
127 	BPF_CORE_READ_INTO(&event.saddr_v4, sk, __sk_common.skc_rcv_saddr);
128 	BPF_CORE_READ_INTO(&event.daddr_v4, sk, __sk_common.skc_daddr);
129 	event.sport = sport;
130 	event.dport = dport;
131 	bpf_get_current_comm(event.task, sizeof(event.task));
132 
133 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
134 			      &event, sizeof(event));
135 }
136 
137 static __always_inline void
trace_v6(struct pt_regs * ctx,pid_t pid,struct sock * sk,__u16 sport,__u16 dport)138 trace_v6(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport)
139 {
140 	struct event event = {};
141 
142 	event.af = AF_INET6;
143 	event.pid = pid;
144 	event.uid = bpf_get_current_uid_gid();
145 	event.ts_us = bpf_ktime_get_ns() / 1000;
146 	BPF_CORE_READ_INTO(&event.saddr_v6, sk,
147 			   __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
148 	BPF_CORE_READ_INTO(&event.daddr_v6, sk,
149 			   __sk_common.skc_v6_daddr.in6_u.u6_addr32);
150 	event.sport = sport;
151 	event.dport = dport;
152 	bpf_get_current_comm(event.task, sizeof(event.task));
153 
154 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
155 			      &event, sizeof(event));
156 }
157 
158 static __always_inline int
exit_tcp_connect(struct pt_regs * ctx,int ret,int ip_ver)159 exit_tcp_connect(struct pt_regs *ctx, int ret, int ip_ver)
160 {
161 	__u64 pid_tgid = bpf_get_current_pid_tgid();
162 	__u32 pid = pid_tgid >> 32;
163 	__u32 tid = pid_tgid;
164 	struct sock **skpp;
165 	struct sock *sk;
166 	__u16 sport = 0;
167 	__u16 dport;
168 
169 	skpp = bpf_map_lookup_elem(&sockets, &tid);
170 	if (!skpp)
171 		return 0;
172 
173 	if (ret)
174 		goto end;
175 
176 	sk = *skpp;
177 
178 	if (source_port)
179 		BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num);
180 	BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport);
181 
182 	if (filter_port(dport))
183 		goto end;
184 
185 	if (do_count) {
186 		if (ip_ver == 4)
187 			count_v4(sk, sport, dport);
188 		else
189 			count_v6(sk, sport, dport);
190 	} else {
191 		if (ip_ver == 4)
192 			trace_v4(ctx, pid, sk, sport, dport);
193 		else
194 			trace_v6(ctx, pid, sk, sport, dport);
195 	}
196 
197 end:
198 	bpf_map_delete_elem(&sockets, &tid);
199 	return 0;
200 }
201 
202 SEC("kprobe/tcp_v4_connect")
BPF_KPROBE(tcp_v4_connect,struct sock * sk)203 int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
204 {
205 	return enter_tcp_connect(ctx, sk);
206 }
207 
208 SEC("kretprobe/tcp_v4_connect")
BPF_KRETPROBE(tcp_v4_connect_ret,int ret)209 int BPF_KRETPROBE(tcp_v4_connect_ret, int ret)
210 {
211 	return exit_tcp_connect(ctx, ret, 4);
212 }
213 
214 SEC("kprobe/tcp_v6_connect")
BPF_KPROBE(tcp_v6_connect,struct sock * sk)215 int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
216 {
217 	return enter_tcp_connect(ctx, sk);
218 }
219 
220 SEC("kretprobe/tcp_v6_connect")
BPF_KRETPROBE(tcp_v6_connect_ret,int ret)221 int BPF_KRETPROBE(tcp_v6_connect_ret, int ret)
222 {
223 	return exit_tcp_connect(ctx, ret, 6);
224 }
225 
226 char LICENSE[] SEC("license") = "GPL";
227