1 #ifndef PERF_UTIL_KWORK_H
2 #define PERF_UTIL_KWORK_H
3
4 #include "perf.h"
5 #include "util/tool.h"
6 #include "util/time-utils.h"
7
8 #include <linux/bitmap.h>
9 #include <linux/list.h>
10 #include <linux/rbtree.h>
11 #include <linux/types.h>
12
13 struct perf_sample;
14 struct perf_session;
15
16 enum kwork_class_type {
17 KWORK_CLASS_IRQ,
18 KWORK_CLASS_SOFTIRQ,
19 KWORK_CLASS_WORKQUEUE,
20 KWORK_CLASS_SCHED,
21 KWORK_CLASS_MAX,
22 };
23
24 enum kwork_report_type {
25 KWORK_REPORT_RUNTIME,
26 KWORK_REPORT_LATENCY,
27 KWORK_REPORT_TIMEHIST,
28 KWORK_REPORT_TOP,
29 };
30
31 enum kwork_trace_type {
32 KWORK_TRACE_RAISE,
33 KWORK_TRACE_ENTRY,
34 KWORK_TRACE_EXIT,
35 KWORK_TRACE_MAX,
36 };
37
38 /*
39 * data structure:
40 *
41 * +==================+ +============+ +======================+
42 * | class | | work | | atom |
43 * +==================+ +============+ +======================+
44 * +------------+ | +-----+ | | +------+ | | +-------+ +-----+ |
45 * | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+ +-----------+
46 * +-----+------+ || +-----+ ||| +------+ ||| +-------+ +-----+ | | | |
47 * | || ||| ||| | +-> | atom_page |
48 * | || ||| ||| +-------+ +-----+ | | |
49 * | class_list ||| |+-> | entry | - | ... | ----> | |
50 * | || ||| ||| +-------+ +-----+ | | |
51 * | || ||| ||| | +-> | |
52 * | || ||| ||| +-------+ +-----+ | | | |
53 * | || ||| |+-> | exit | - | ... | --+ +-----+-----+
54 * | || ||| | | +-------+ +-----+ | |
55 * | || ||| | | | |
56 * | || ||| +-----+ | | | |
57 * | || |+-> | ... | | | | |
58 * | || | | +-----+ | | | |
59 * | || | | | | | |
60 * | || +---------+ | | +-----+ | | +-------+ +-----+ | |
61 * | +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+ +-----+-----+
62 * | || +---------+ | | +-----+ ||| +-------+ +-----+ | | | |
63 * | || | | ||| | +-> | atom_page |
64 * | || | | ||| +-------+ +-----+ | | |
65 * | || | | |+-> | entry | - | ... | ----> | |
66 * | || | | ||| +-------+ +-----+ | | |
67 * | || | | ||| | +-> | |
68 * | || | | ||| +-------+ +-----+ | | | |
69 * | || | | |+-> | exit | - | ... | --+ +-----+-----+
70 * | || | | | | +-------+ +-----+ | |
71 * | || | | | | | |
72 * | || +-----------+ | | +-----+ | | | |
73 * | +-> | workqueue | -----> | ... | | | | |
74 * | | +-----------+ | | +-----+ | | | |
75 * | +==================+ +============+ +======================+ |
76 * | |
77 * +----> atom_page_list ---------------------------------------------------------+
78 *
79 */
80
81 struct kwork_atom {
82 struct list_head list;
83 u64 time;
84 struct kwork_atom *prev;
85
86 void *page_addr;
87 unsigned long bit_inpage;
88 };
89
90 #define NR_ATOM_PER_PAGE 128
91 struct kwork_atom_page {
92 struct list_head list;
93 struct kwork_atom atoms[NR_ATOM_PER_PAGE];
94 DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
95 };
96
97 struct perf_kwork;
98 struct kwork_class;
99 struct kwork_work {
100 /*
101 * class field
102 */
103 struct rb_node node;
104 struct kwork_class *class;
105
106 /*
107 * work field
108 */
109 u64 id;
110 int cpu;
111 char *name;
112
113 /*
114 * atom field
115 */
116 u64 nr_atoms;
117 struct list_head atom_list[KWORK_TRACE_MAX];
118
119 /*
120 * runtime report
121 */
122 u64 max_runtime;
123 u64 max_runtime_start;
124 u64 max_runtime_end;
125 u64 total_runtime;
126
127 /*
128 * latency report
129 */
130 u64 max_latency;
131 u64 max_latency_start;
132 u64 max_latency_end;
133 u64 total_latency;
134
135 /*
136 * top report
137 */
138 u32 cpu_usage;
139 u32 tgid;
140 bool is_kthread;
141 };
142
143 struct kwork_class {
144 struct list_head list;
145 const char *name;
146 enum kwork_class_type type;
147
148 unsigned int nr_tracepoints;
149 const struct evsel_str_handler *tp_handlers;
150
151 struct rb_root_cached work_root;
152
153 int (*class_init)(struct kwork_class *class,
154 struct perf_session *session);
155
156 void (*work_init)(struct perf_kwork *kwork,
157 struct kwork_class *class,
158 struct kwork_work *work,
159 enum kwork_trace_type src_type,
160 struct evsel *evsel,
161 struct perf_sample *sample,
162 struct machine *machine);
163
164 void (*work_name)(struct kwork_work *work,
165 char *buf, int len);
166 };
167
168 struct trace_kwork_handler {
169 int (*raise_event)(struct perf_kwork *kwork,
170 struct kwork_class *class, struct evsel *evsel,
171 struct perf_sample *sample, struct machine *machine);
172
173 int (*entry_event)(struct perf_kwork *kwork,
174 struct kwork_class *class, struct evsel *evsel,
175 struct perf_sample *sample, struct machine *machine);
176
177 int (*exit_event)(struct perf_kwork *kwork,
178 struct kwork_class *class, struct evsel *evsel,
179 struct perf_sample *sample, struct machine *machine);
180
181 int (*sched_switch_event)(struct perf_kwork *kwork,
182 struct kwork_class *class, struct evsel *evsel,
183 struct perf_sample *sample, struct machine *machine);
184 };
185
186 struct __top_cpus_runtime {
187 u64 load;
188 u64 idle;
189 u64 irq;
190 u64 softirq;
191 u64 total;
192 };
193
194 struct kwork_top_stat {
195 DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
196 struct __top_cpus_runtime *cpus_runtime;
197 };
198
199 struct perf_kwork {
200 /*
201 * metadata
202 */
203 struct perf_tool tool;
204 struct list_head class_list;
205 struct list_head atom_page_list;
206 struct list_head sort_list, cmp_id;
207 struct rb_root_cached sorted_work_root;
208 const struct trace_kwork_handler *tp_handler;
209
210 /*
211 * profile filters
212 */
213 const char *profile_name;
214
215 const char *cpu_list;
216 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
217
218 const char *time_str;
219 struct perf_time_interval ptime;
220
221 /*
222 * options for command
223 */
224 bool force;
225 const char *event_list_str;
226 enum kwork_report_type report;
227
228 /*
229 * options for subcommand
230 */
231 bool summary;
232 const char *sort_order;
233 bool show_callchain;
234 unsigned int max_stack;
235 bool use_bpf;
236
237 /*
238 * statistics
239 */
240 u64 timestart;
241 u64 timeend;
242
243 unsigned long nr_events;
244 unsigned long nr_lost_chunks;
245 unsigned long nr_lost_events;
246
247 u64 all_runtime;
248 u64 all_count;
249 u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
250
251 /*
252 * perf kwork top data
253 */
254 struct kwork_top_stat top_stat;
255
256 /* Add work callback. */
257 struct kwork_work *(*add_work)(struct perf_kwork *kwork,
258 struct kwork_class *class,
259 struct kwork_work *key);
260
261 };
262
263 #ifdef HAVE_BPF_SKEL
264
265 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
266 int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
267 void perf_kwork__report_cleanup_bpf(void);
268
269 void perf_kwork__trace_start(void);
270 void perf_kwork__trace_finish(void);
271
272 int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork);
273 int perf_kwork__top_read_bpf(struct perf_kwork *kwork);
274 void perf_kwork__top_cleanup_bpf(void);
275
276 void perf_kwork__top_start(void);
277 void perf_kwork__top_finish(void);
278
279 #else /* !HAVE_BPF_SKEL */
280
281 static inline int
perf_kwork__trace_prepare_bpf(struct perf_kwork * kwork __maybe_unused)282 perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
283 {
284 return -1;
285 }
286
287 static inline int
perf_kwork__report_read_bpf(struct perf_kwork * kwork __maybe_unused)288 perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
289 {
290 return -1;
291 }
292
perf_kwork__report_cleanup_bpf(void)293 static inline void perf_kwork__report_cleanup_bpf(void) {}
294
perf_kwork__trace_start(void)295 static inline void perf_kwork__trace_start(void) {}
perf_kwork__trace_finish(void)296 static inline void perf_kwork__trace_finish(void) {}
297
298 static inline int
perf_kwork__top_prepare_bpf(struct perf_kwork * kwork __maybe_unused)299 perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
300 {
301 return -1;
302 }
303
304 static inline int
perf_kwork__top_read_bpf(struct perf_kwork * kwork __maybe_unused)305 perf_kwork__top_read_bpf(struct perf_kwork *kwork __maybe_unused)
306 {
307 return -1;
308 }
309
perf_kwork__top_cleanup_bpf(void)310 static inline void perf_kwork__top_cleanup_bpf(void) {}
311
perf_kwork__top_start(void)312 static inline void perf_kwork__top_start(void) {}
perf_kwork__top_finish(void)313 static inline void perf_kwork__top_finish(void) {}
314
315 #endif /* HAVE_BPF_SKEL */
316
317 #endif /* PERF_UTIL_KWORK_H */
318