1 #include <stdio.h>
2 #include <fcntl.h>
3 #include <stdint.h>
4 #include <stdlib.h>
5 
6 #include <linux/err.h>
7 
8 #include "util/ftrace.h"
9 #include "util/cpumap.h"
10 #include "util/thread_map.h"
11 #include "util/debug.h"
12 #include "util/evlist.h"
13 #include "util/bpf_counter.h"
14 #include "util/stat.h"
15 
16 #include "util/bpf_skel/func_latency.skel.h"
17 
18 static struct func_latency_bpf *skel;
19 
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace)20 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
21 {
22 	int fd, err;
23 	int i, ncpus = 1, ntasks = 1;
24 	struct filter_entry *func;
25 
26 	if (!list_is_singular(&ftrace->filters)) {
27 		pr_err("ERROR: %s target function(s).\n",
28 		       list_empty(&ftrace->filters) ? "No" : "Too many");
29 		return -1;
30 	}
31 
32 	func = list_first_entry(&ftrace->filters, struct filter_entry, list);
33 
34 	skel = func_latency_bpf__open();
35 	if (!skel) {
36 		pr_err("Failed to open func latency skeleton\n");
37 		return -1;
38 	}
39 
40 	skel->rodata->bucket_range = ftrace->bucket_range;
41 	skel->rodata->min_latency = ftrace->min_latency;
42 
43 	/* don't need to set cpu filter for system-wide mode */
44 	if (ftrace->target.cpu_list) {
45 		ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
46 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
47 		skel->rodata->has_cpu = 1;
48 	}
49 
50 	if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
51 		ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
52 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
53 		skel->rodata->has_task = 1;
54 	}
55 
56 	skel->rodata->use_nsec = ftrace->use_nsec;
57 
58 	set_max_rlimit();
59 
60 	err = func_latency_bpf__load(skel);
61 	if (err) {
62 		pr_err("Failed to load func latency skeleton\n");
63 		goto out;
64 	}
65 
66 	if (ftrace->target.cpu_list) {
67 		u32 cpu;
68 		u8 val = 1;
69 
70 		fd = bpf_map__fd(skel->maps.cpu_filter);
71 
72 		for (i = 0; i < ncpus; i++) {
73 			cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
74 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
75 		}
76 	}
77 
78 	if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
79 		u32 pid;
80 		u8 val = 1;
81 
82 		fd = bpf_map__fd(skel->maps.task_filter);
83 
84 		for (i = 0; i < ntasks; i++) {
85 			pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
86 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
87 		}
88 	}
89 
90 	skel->bss->min = INT64_MAX;
91 
92 	skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
93 							    false, func->name);
94 	if (IS_ERR(skel->links.func_begin)) {
95 		pr_err("Failed to attach fentry program\n");
96 		err = PTR_ERR(skel->links.func_begin);
97 		goto out;
98 	}
99 
100 	skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
101 							  true, func->name);
102 	if (IS_ERR(skel->links.func_end)) {
103 		pr_err("Failed to attach fexit program\n");
104 		err = PTR_ERR(skel->links.func_end);
105 		goto out;
106 	}
107 
108 	/* XXX: we don't actually use this fd - just for poll() */
109 	return open("/dev/null", O_RDONLY);
110 
111 out:
112 	return err;
113 }
114 
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)115 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
116 {
117 	skel->bss->enabled = 1;
118 	return 0;
119 }
120 
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)121 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
122 {
123 	skel->bss->enabled = 0;
124 	return 0;
125 }
126 
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[],struct stats * stats)127 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
128 				  int buckets[], struct stats *stats)
129 {
130 	int i, fd, err;
131 	u32 idx;
132 	u64 *hist;
133 	int ncpus = cpu__max_cpu().cpu;
134 
135 	fd = bpf_map__fd(skel->maps.latency);
136 
137 	hist = calloc(ncpus, sizeof(*hist));
138 	if (hist == NULL)
139 		return -ENOMEM;
140 
141 	for (idx = 0; idx < NUM_BUCKET; idx++) {
142 		err = bpf_map_lookup_elem(fd, &idx, hist);
143 		if (err) {
144 			buckets[idx] = 0;
145 			continue;
146 		}
147 
148 		for (i = 0; i < ncpus; i++)
149 			buckets[idx] += hist[i];
150 	}
151 
152 	if (skel->bss->count) {
153 		stats->mean = skel->bss->total / skel->bss->count;
154 		stats->n = skel->bss->count;
155 		stats->max = skel->bss->max;
156 		stats->min = skel->bss->min;
157 	}
158 
159 	free(hist);
160 	return 0;
161 }
162 
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)163 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
164 {
165 	func_latency_bpf__destroy(skel);
166 	return 0;
167 }
168