1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6
7 #include "maps.bpf.h"
8 #include "memleak.h"
9 #include "core_fixes.bpf.h"
10
11 const volatile size_t min_size = 0;
12 const volatile size_t max_size = -1;
13 const volatile size_t page_size = 4096;
14 const volatile __u64 sample_rate = 1;
15 const volatile bool trace_all = false;
16 const volatile __u64 stack_flags = 0;
17 const volatile bool wa_missing_free = false;
18
19 struct {
20 __uint(type, BPF_MAP_TYPE_HASH);
21 __type(key, u32);
22 __type(value, u64);
23 __uint(max_entries, 10240);
24 } sizes SEC(".maps");
25
26 struct {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __type(key, u64); /* address */
29 __type(value, struct alloc_info);
30 __uint(max_entries, ALLOCS_MAX_ENTRIES);
31 } allocs SEC(".maps");
32
33 struct {
34 __uint(type, BPF_MAP_TYPE_HASH);
35 __type(key, u64); /* stack id */
36 __type(value, union combined_alloc_info);
37 __uint(max_entries, COMBINED_ALLOCS_MAX_ENTRIES);
38 } combined_allocs SEC(".maps");
39
40 struct {
41 __uint(type, BPF_MAP_TYPE_HASH);
42 __type(key, u64);
43 __type(value, u64);
44 __uint(max_entries, 10240);
45 } memptrs SEC(".maps");
46
47 struct {
48 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
49 __type(key, u32);
50 } stack_traces SEC(".maps");
51
52 static union combined_alloc_info initial_cinfo;
53
update_statistics_add(u64 stack_id,u64 sz)54 static void update_statistics_add(u64 stack_id, u64 sz)
55 {
56 union combined_alloc_info *existing_cinfo;
57
58 existing_cinfo = bpf_map_lookup_or_try_init(&combined_allocs, &stack_id, &initial_cinfo);
59 if (!existing_cinfo)
60 return;
61
62 const union combined_alloc_info incremental_cinfo = {
63 .total_size = sz,
64 .number_of_allocs = 1
65 };
66
67 __sync_fetch_and_add(&existing_cinfo->bits, incremental_cinfo.bits);
68 }
69
update_statistics_del(u64 stack_id,u64 sz)70 static void update_statistics_del(u64 stack_id, u64 sz)
71 {
72 union combined_alloc_info *existing_cinfo;
73
74 existing_cinfo = bpf_map_lookup_elem(&combined_allocs, &stack_id);
75 if (!existing_cinfo) {
76 bpf_printk("failed to lookup combined allocs\n");
77
78 return;
79 }
80
81 const union combined_alloc_info decremental_cinfo = {
82 .total_size = sz,
83 .number_of_allocs = 1
84 };
85
86 __sync_fetch_and_sub(&existing_cinfo->bits, decremental_cinfo.bits);
87 }
88
gen_alloc_enter(size_t size)89 static int gen_alloc_enter(size_t size)
90 {
91 if (size < min_size || size > max_size)
92 return 0;
93
94 if (sample_rate > 1) {
95 if (bpf_ktime_get_ns() % sample_rate != 0)
96 return 0;
97 }
98
99 const u32 tid = bpf_get_current_pid_tgid();
100 bpf_map_update_elem(&sizes, &tid, &size, BPF_ANY);
101
102 if (trace_all)
103 bpf_printk("alloc entered, size = %lu\n", size);
104
105 return 0;
106 }
107
gen_alloc_exit2(void * ctx,u64 address)108 static int gen_alloc_exit2(void *ctx, u64 address)
109 {
110 const u32 tid = bpf_get_current_pid_tgid();
111 struct alloc_info info;
112
113 const u64* size = bpf_map_lookup_elem(&sizes, &tid);
114 if (!size)
115 return 0; // missed alloc entry
116
117 __builtin_memset(&info, 0, sizeof(info));
118
119 info.size = *size;
120 bpf_map_delete_elem(&sizes, &tid);
121
122 if (address != 0) {
123 info.timestamp_ns = bpf_ktime_get_ns();
124
125 info.stack_id = bpf_get_stackid(ctx, &stack_traces, stack_flags);
126
127 bpf_map_update_elem(&allocs, &address, &info, BPF_ANY);
128
129 update_statistics_add(info.stack_id, info.size);
130 }
131
132 if (trace_all) {
133 bpf_printk("alloc exited, size = %lu, result = %lx\n",
134 info.size, address);
135 }
136
137 return 0;
138 }
139
gen_alloc_exit(struct pt_regs * ctx)140 static int gen_alloc_exit(struct pt_regs *ctx)
141 {
142 return gen_alloc_exit2(ctx, PT_REGS_RC(ctx));
143 }
144
gen_free_enter(const void * address)145 static int gen_free_enter(const void *address)
146 {
147 const u64 addr = (u64)address;
148
149 const struct alloc_info *info = bpf_map_lookup_elem(&allocs, &addr);
150 if (!info)
151 return 0;
152
153 bpf_map_delete_elem(&allocs, &addr);
154 update_statistics_del(info->stack_id, info->size);
155
156 if (trace_all) {
157 bpf_printk("free entered, address = %lx, size = %lu\n",
158 address, info->size);
159 }
160
161 return 0;
162 }
163
164 SEC("uprobe")
BPF_KPROBE(malloc_enter,size_t size)165 int BPF_KPROBE(malloc_enter, size_t size)
166 {
167 return gen_alloc_enter(size);
168 }
169
170 SEC("uretprobe")
BPF_KRETPROBE(malloc_exit)171 int BPF_KRETPROBE(malloc_exit)
172 {
173 return gen_alloc_exit(ctx);
174 }
175
176 SEC("uprobe")
BPF_KPROBE(free_enter,void * address)177 int BPF_KPROBE(free_enter, void *address)
178 {
179 return gen_free_enter(address);
180 }
181
182 SEC("uprobe")
BPF_KPROBE(calloc_enter,size_t nmemb,size_t size)183 int BPF_KPROBE(calloc_enter, size_t nmemb, size_t size)
184 {
185 return gen_alloc_enter(nmemb * size);
186 }
187
188 SEC("uretprobe")
BPF_KRETPROBE(calloc_exit)189 int BPF_KRETPROBE(calloc_exit)
190 {
191 return gen_alloc_exit(ctx);
192 }
193
194 SEC("uprobe")
BPF_KPROBE(realloc_enter,void * ptr,size_t size)195 int BPF_KPROBE(realloc_enter, void *ptr, size_t size)
196 {
197 gen_free_enter(ptr);
198
199 return gen_alloc_enter(size);
200 }
201
202 SEC("uretprobe")
BPF_KRETPROBE(realloc_exit)203 int BPF_KRETPROBE(realloc_exit)
204 {
205 return gen_alloc_exit(ctx);
206 }
207
208 SEC("uprobe")
BPF_KPROBE(mmap_enter,void * address,size_t size)209 int BPF_KPROBE(mmap_enter, void *address, size_t size)
210 {
211 return gen_alloc_enter(size);
212 }
213
214 SEC("uretprobe")
BPF_KRETPROBE(mmap_exit)215 int BPF_KRETPROBE(mmap_exit)
216 {
217 return gen_alloc_exit(ctx);
218 }
219
220 SEC("uprobe")
BPF_KPROBE(munmap_enter,void * address)221 int BPF_KPROBE(munmap_enter, void *address)
222 {
223 return gen_free_enter(address);
224 }
225
226 SEC("uprobe")
BPF_KPROBE(posix_memalign_enter,void ** memptr,size_t alignment,size_t size)227 int BPF_KPROBE(posix_memalign_enter, void **memptr, size_t alignment, size_t size)
228 {
229 const u64 memptr64 = (u64)(size_t)memptr;
230 const u32 tid = bpf_get_current_pid_tgid();
231 bpf_map_update_elem(&memptrs, &tid, &memptr64, BPF_ANY);
232
233 return gen_alloc_enter(size);
234 }
235
236 SEC("uretprobe")
BPF_KRETPROBE(posix_memalign_exit)237 int BPF_KRETPROBE(posix_memalign_exit)
238 {
239 u64 *memptr64;
240 void *addr;
241 const u32 tid = bpf_get_current_pid_tgid();
242
243 memptr64 = bpf_map_lookup_elem(&memptrs, &tid);
244 if (!memptr64)
245 return 0;
246
247 bpf_map_delete_elem(&memptrs, &tid);
248
249 if (bpf_probe_read_user(&addr, sizeof(void*), (void*)(size_t)*memptr64))
250 return 0;
251
252 const u64 addr64 = (u64)(size_t)addr;
253
254 return gen_alloc_exit2(ctx, addr64);
255 }
256
257 SEC("uprobe")
BPF_KPROBE(aligned_alloc_enter,size_t alignment,size_t size)258 int BPF_KPROBE(aligned_alloc_enter, size_t alignment, size_t size)
259 {
260 return gen_alloc_enter(size);
261 }
262
263 SEC("uretprobe")
BPF_KRETPROBE(aligned_alloc_exit)264 int BPF_KRETPROBE(aligned_alloc_exit)
265 {
266 return gen_alloc_exit(ctx);
267 }
268
269 SEC("uprobe")
BPF_KPROBE(valloc_enter,size_t size)270 int BPF_KPROBE(valloc_enter, size_t size)
271 {
272 return gen_alloc_enter(size);
273 }
274
275 SEC("uretprobe")
BPF_KRETPROBE(valloc_exit)276 int BPF_KRETPROBE(valloc_exit)
277 {
278 return gen_alloc_exit(ctx);
279 }
280
281 SEC("uprobe")
BPF_KPROBE(memalign_enter,size_t alignment,size_t size)282 int BPF_KPROBE(memalign_enter, size_t alignment, size_t size)
283 {
284 return gen_alloc_enter(size);
285 }
286
287 SEC("uretprobe")
BPF_KRETPROBE(memalign_exit)288 int BPF_KRETPROBE(memalign_exit)
289 {
290 return gen_alloc_exit(ctx);
291 }
292
293 SEC("uprobe")
BPF_KPROBE(pvalloc_enter,size_t size)294 int BPF_KPROBE(pvalloc_enter, size_t size)
295 {
296 return gen_alloc_enter(size);
297 }
298
299 SEC("uretprobe")
BPF_KRETPROBE(pvalloc_exit)300 int BPF_KRETPROBE(pvalloc_exit)
301 {
302 return gen_alloc_exit(ctx);
303 }
304
305 SEC("tracepoint/kmem/kmalloc")
memleak__kmalloc(void * ctx)306 int memleak__kmalloc(void *ctx)
307 {
308 const void *ptr;
309 size_t bytes_alloc;
310
311 if (has_kmem_alloc()) {
312 struct trace_event_raw_kmem_alloc___x *args = ctx;
313 ptr = BPF_CORE_READ(args, ptr);
314 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
315 } else {
316 struct trace_event_raw_kmalloc___x *args = ctx;
317 ptr = BPF_CORE_READ(args, ptr);
318 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
319 }
320
321 if (wa_missing_free)
322 gen_free_enter(ptr);
323
324 gen_alloc_enter(bytes_alloc);
325
326 return gen_alloc_exit2(ctx, (u64)ptr);
327 }
328
329 SEC("tracepoint/kmem/kmalloc_node")
memleak__kmalloc_node(void * ctx)330 int memleak__kmalloc_node(void *ctx)
331 {
332 const void *ptr;
333 size_t bytes_alloc;
334
335 if (has_kmem_alloc_node()) {
336 struct trace_event_raw_kmem_alloc_node___x *args = ctx;
337 ptr = BPF_CORE_READ(args, ptr);
338 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
339
340 if (wa_missing_free)
341 gen_free_enter(ptr);
342
343 gen_alloc_enter( bytes_alloc);
344
345 return gen_alloc_exit2(ctx, (u64)ptr);
346 } else {
347 /* tracepoint is disabled if not exist, avoid compile warning */
348 return 0;
349 }
350 }
351
352 SEC("tracepoint/kmem/kfree")
memleak__kfree(void * ctx)353 int memleak__kfree(void *ctx)
354 {
355 const void *ptr;
356
357 if (has_kfree()) {
358 struct trace_event_raw_kfree___x *args = ctx;
359 ptr = BPF_CORE_READ(args, ptr);
360 } else {
361 struct trace_event_raw_kmem_free___x *args = ctx;
362 ptr = BPF_CORE_READ(args, ptr);
363 }
364
365 return gen_free_enter(ptr);
366 }
367
368 SEC("tracepoint/kmem/kmem_cache_alloc")
memleak__kmem_cache_alloc(void * ctx)369 int memleak__kmem_cache_alloc(void *ctx)
370 {
371 const void *ptr;
372 size_t bytes_alloc;
373
374 if (has_kmem_alloc()) {
375 struct trace_event_raw_kmem_alloc___x *args = ctx;
376 ptr = BPF_CORE_READ(args, ptr);
377 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
378 } else {
379 struct trace_event_raw_kmem_cache_alloc___x *args = ctx;
380 ptr = BPF_CORE_READ(args, ptr);
381 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
382 }
383
384 if (wa_missing_free)
385 gen_free_enter(ptr);
386
387 gen_alloc_enter(bytes_alloc);
388
389 return gen_alloc_exit2(ctx, (u64)ptr);
390 }
391
392 SEC("tracepoint/kmem/kmem_cache_alloc_node")
memleak__kmem_cache_alloc_node(void * ctx)393 int memleak__kmem_cache_alloc_node(void *ctx)
394 {
395 const void *ptr;
396 size_t bytes_alloc;
397
398 if (has_kmem_alloc_node()) {
399 struct trace_event_raw_kmem_alloc_node___x *args = ctx;
400 ptr = BPF_CORE_READ(args, ptr);
401 bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
402
403 if (wa_missing_free)
404 gen_free_enter(ptr);
405
406 gen_alloc_enter(bytes_alloc);
407
408 return gen_alloc_exit2(ctx, (u64)ptr);
409 } else {
410 /* tracepoint is disabled if not exist, avoid compile warning */
411 return 0;
412 }
413 }
414
415 SEC("tracepoint/kmem/kmem_cache_free")
memleak__kmem_cache_free(void * ctx)416 int memleak__kmem_cache_free(void *ctx)
417 {
418 const void *ptr;
419
420 if (has_kmem_cache_free()) {
421 struct trace_event_raw_kmem_cache_free___x *args = ctx;
422 ptr = BPF_CORE_READ(args, ptr);
423 } else {
424 struct trace_event_raw_kmem_free___x *args = ctx;
425 ptr = BPF_CORE_READ(args, ptr);
426 }
427
428 return gen_free_enter(ptr);
429 }
430
431 SEC("tracepoint/kmem/mm_page_alloc")
memleak__mm_page_alloc(struct trace_event_raw_mm_page_alloc * ctx)432 int memleak__mm_page_alloc(struct trace_event_raw_mm_page_alloc *ctx)
433 {
434 gen_alloc_enter(page_size << ctx->order);
435
436 return gen_alloc_exit2(ctx, ctx->pfn);
437 }
438
439 SEC("tracepoint/kmem/mm_page_free")
memleak__mm_page_free(struct trace_event_raw_mm_page_free * ctx)440 int memleak__mm_page_free(struct trace_event_raw_mm_page_free *ctx)
441 {
442 return gen_free_enter((void *)ctx->pfn);
443 }
444
445 SEC("tracepoint/percpu/percpu_alloc_percpu")
memleak__percpu_alloc_percpu(struct trace_event_raw_percpu_alloc_percpu * ctx)446 int memleak__percpu_alloc_percpu(struct trace_event_raw_percpu_alloc_percpu *ctx)
447 {
448 gen_alloc_enter(ctx->bytes_alloc);
449
450 return gen_alloc_exit2(ctx, (u64)(ctx->ptr));
451 }
452
453 SEC("tracepoint/percpu/percpu_free_percpu")
memleak__percpu_free_percpu(struct trace_event_raw_percpu_free_percpu * ctx)454 int memleak__percpu_free_percpu(struct trace_event_raw_percpu_free_percpu *ctx)
455 {
456 return gen_free_enter(ctx->ptr);
457 }
458
459 char LICENSE[] SEC("license") = "GPL";
460