1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "mem-info.h"
13 #include "session.h"
14 #include "namespaces.h"
15 #include "cgroup.h"
16 #include "sort.h"
17 #include "units.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "annotate.h"
21 #include "srcline.h"
22 #include "symbol.h"
23 #include "thread.h"
24 #include "block-info.h"
25 #include "ui/progress.h"
26 #include <errno.h>
27 #include <math.h>
28 #include <inttypes.h>
29 #include <sys/param.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
34
35 static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
36 static int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
37
38 static bool hists__filter_entry_by_dso(struct hists *hists,
39 struct hist_entry *he);
40 static bool hists__filter_entry_by_thread(struct hists *hists,
41 struct hist_entry *he);
42 static bool hists__filter_entry_by_symbol(struct hists *hists,
43 struct hist_entry *he);
44 static bool hists__filter_entry_by_socket(struct hists *hists,
45 struct hist_entry *he);
46
hists__col_len(struct hists * hists,enum hist_column col)47 u16 hists__col_len(struct hists *hists, enum hist_column col)
48 {
49 return hists->col_len[col];
50 }
51
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)52 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54 hists->col_len[col] = len;
55 }
56
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)57 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
58 {
59 if (len > hists__col_len(hists, col)) {
60 hists__set_col_len(hists, col, len);
61 return true;
62 }
63 return false;
64 }
65
hists__reset_col_len(struct hists * hists)66 void hists__reset_col_len(struct hists *hists)
67 {
68 enum hist_column col;
69
70 for (col = 0; col < HISTC_NR_COLS; ++col)
71 hists__set_col_len(hists, col, 0);
72 }
73
hists__set_unres_dso_col_len(struct hists * hists,int dso)74 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
75 {
76 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
77
78 if (hists__col_len(hists, dso) < unresolved_col_width &&
79 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
80 !symbol_conf.dso_list)
81 hists__set_col_len(hists, dso, unresolved_col_width);
82 }
83
hists__calc_col_len(struct hists * hists,struct hist_entry * h)84 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
85 {
86 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
87 int symlen;
88 u16 len;
89
90 if (h->block_info)
91 return;
92 /*
93 * +4 accounts for '[x] ' priv level info
94 * +2 accounts for 0x prefix on raw addresses
95 * +3 accounts for ' y ' symtab origin info
96 */
97 if (h->ms.sym) {
98 symlen = h->ms.sym->namelen + 4;
99 if (verbose > 0)
100 symlen += BITS_PER_LONG / 4 + 2 + 3;
101 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
102 } else {
103 symlen = unresolved_col_width + 4 + 2;
104 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
105 hists__set_unres_dso_col_len(hists, HISTC_DSO);
106 }
107
108 len = thread__comm_len(h->thread);
109 if (hists__new_col_len(hists, HISTC_COMM, len))
110 hists__set_col_len(hists, HISTC_THREAD, len + 8);
111
112 if (h->ms.map) {
113 len = dso__name_len(map__dso(h->ms.map));
114 hists__new_col_len(hists, HISTC_DSO, len);
115 }
116
117 if (h->parent)
118 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
119
120 if (h->branch_info) {
121 if (h->branch_info->from.ms.sym) {
122 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
123 if (verbose > 0)
124 symlen += BITS_PER_LONG / 4 + 2 + 3;
125 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
126
127 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
128 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
129 } else {
130 symlen = unresolved_col_width + 4 + 2;
131 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
132 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
133 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
134 }
135
136 if (h->branch_info->to.ms.sym) {
137 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
138 if (verbose > 0)
139 symlen += BITS_PER_LONG / 4 + 2 + 3;
140 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141
142 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
143 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
144 } else {
145 symlen = unresolved_col_width + 4 + 2;
146 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
147 hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
148 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
149 }
150
151 if (h->branch_info->srcline_from)
152 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
153 strlen(h->branch_info->srcline_from));
154 if (h->branch_info->srcline_to)
155 hists__new_col_len(hists, HISTC_SRCLINE_TO,
156 strlen(h->branch_info->srcline_to));
157 }
158
159 if (h->mem_info) {
160 if (mem_info__daddr(h->mem_info)->ms.sym) {
161 symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
162 + unresolved_col_width + 2;
163 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
164 symlen);
165 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
166 symlen + 1);
167 } else {
168 symlen = unresolved_col_width + 4 + 2;
169 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
170 symlen);
171 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
172 symlen);
173 }
174
175 if (mem_info__iaddr(h->mem_info)->ms.sym) {
176 symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
177 + unresolved_col_width + 2;
178 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
179 symlen);
180 } else {
181 symlen = unresolved_col_width + 4 + 2;
182 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
183 symlen);
184 }
185
186 if (mem_info__daddr(h->mem_info)->ms.map) {
187 symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
188 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
189 symlen);
190 } else {
191 symlen = unresolved_col_width + 4 + 2;
192 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
193 }
194
195 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
196 unresolved_col_width + 4 + 2);
197
198 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
199 unresolved_col_width + 4 + 2);
200
201 } else {
202 symlen = unresolved_col_width + 4 + 2;
203 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
204 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
205 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
206 }
207
208 hists__new_col_len(hists, HISTC_CGROUP, 6);
209 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
210 hists__new_col_len(hists, HISTC_CPU, 3);
211 hists__new_col_len(hists, HISTC_SOCKET, 6);
212 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
213 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
214 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
215 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
216 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
217 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
218 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
219 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
220 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
221 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
222 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
223 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
224 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_PREDICTED, 9);
225 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_ABORT, 5);
226 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_CYCLES, 6);
227
228 if (symbol_conf.nanosecs)
229 hists__new_col_len(hists, HISTC_TIME, 16);
230 else
231 hists__new_col_len(hists, HISTC_TIME, 12);
232 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
233
234 if (h->srcline) {
235 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
236 hists__new_col_len(hists, HISTC_SRCLINE, len);
237 }
238
239 if (h->srcfile)
240 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
241
242 if (h->transaction)
243 hists__new_col_len(hists, HISTC_TRANSACTION,
244 hist_entry__transaction_len());
245
246 if (h->trace_output)
247 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
248
249 if (h->cgroup) {
250 const char *cgrp_name = "unknown";
251 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
252 h->cgroup);
253 if (cgrp != NULL)
254 cgrp_name = cgrp->name;
255
256 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
257 }
258 }
259
hists__output_recalc_col_len(struct hists * hists,int max_rows)260 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
261 {
262 struct rb_node *next = rb_first_cached(&hists->entries);
263 struct hist_entry *n;
264 int row = 0;
265
266 hists__reset_col_len(hists);
267
268 while (next && row++ < max_rows) {
269 n = rb_entry(next, struct hist_entry, rb_node);
270 if (!n->filtered)
271 hists__calc_col_len(hists, n);
272 next = rb_next(&n->rb_node);
273 }
274 }
275
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)276 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
277 unsigned int cpumode, u64 period)
278 {
279 switch (cpumode) {
280 case PERF_RECORD_MISC_KERNEL:
281 he_stat->period_sys += period;
282 break;
283 case PERF_RECORD_MISC_USER:
284 he_stat->period_us += period;
285 break;
286 case PERF_RECORD_MISC_GUEST_KERNEL:
287 he_stat->period_guest_sys += period;
288 break;
289 case PERF_RECORD_MISC_GUEST_USER:
290 he_stat->period_guest_us += period;
291 break;
292 default:
293 break;
294 }
295 }
296
hist_time(unsigned long htime)297 static long hist_time(unsigned long htime)
298 {
299 unsigned long time_quantum = symbol_conf.time_quantum;
300 if (time_quantum)
301 return (htime / time_quantum) * time_quantum;
302 return htime;
303 }
304
he_stat__add_period(struct he_stat * he_stat,u64 period)305 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
306 {
307 he_stat->period += period;
308 he_stat->nr_events += 1;
309 }
310
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)311 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
312 {
313 dest->period += src->period;
314 dest->period_sys += src->period_sys;
315 dest->period_us += src->period_us;
316 dest->period_guest_sys += src->period_guest_sys;
317 dest->period_guest_us += src->period_guest_us;
318 dest->weight1 += src->weight1;
319 dest->weight2 += src->weight2;
320 dest->weight3 += src->weight3;
321 dest->nr_events += src->nr_events;
322 }
323
he_stat__decay(struct he_stat * he_stat)324 static void he_stat__decay(struct he_stat *he_stat)
325 {
326 he_stat->period = (he_stat->period * 7) / 8;
327 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
328 he_stat->weight1 = (he_stat->weight1 * 7) / 8;
329 he_stat->weight2 = (he_stat->weight2 * 7) / 8;
330 he_stat->weight3 = (he_stat->weight3 * 7) / 8;
331 }
332
333 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
334
hists__decay_entry(struct hists * hists,struct hist_entry * he)335 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
336 {
337 u64 prev_period = he->stat.period;
338 u64 diff;
339
340 if (prev_period == 0)
341 return true;
342
343 he_stat__decay(&he->stat);
344 if (symbol_conf.cumulate_callchain)
345 he_stat__decay(he->stat_acc);
346 decay_callchain(he->callchain);
347
348 diff = prev_period - he->stat.period;
349
350 if (!he->depth) {
351 hists->stats.total_period -= diff;
352 if (!he->filtered)
353 hists->stats.total_non_filtered_period -= diff;
354 }
355
356 if (!he->leaf) {
357 struct hist_entry *child;
358 struct rb_node *node = rb_first_cached(&he->hroot_out);
359 while (node) {
360 child = rb_entry(node, struct hist_entry, rb_node);
361 node = rb_next(node);
362
363 if (hists__decay_entry(hists, child))
364 hists__delete_entry(hists, child);
365 }
366 }
367
368 return he->stat.period == 0;
369 }
370
hists__delete_entry(struct hists * hists,struct hist_entry * he)371 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
372 {
373 struct rb_root_cached *root_in;
374 struct rb_root_cached *root_out;
375
376 if (he->parent_he) {
377 root_in = &he->parent_he->hroot_in;
378 root_out = &he->parent_he->hroot_out;
379 } else {
380 if (hists__has(hists, need_collapse))
381 root_in = &hists->entries_collapsed;
382 else
383 root_in = hists->entries_in;
384 root_out = &hists->entries;
385 }
386
387 rb_erase_cached(&he->rb_node_in, root_in);
388 rb_erase_cached(&he->rb_node, root_out);
389
390 --hists->nr_entries;
391 if (!he->filtered)
392 --hists->nr_non_filtered_entries;
393
394 hist_entry__delete(he);
395 }
396
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)397 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
398 {
399 struct rb_node *next = rb_first_cached(&hists->entries);
400 struct hist_entry *n;
401
402 while (next) {
403 n = rb_entry(next, struct hist_entry, rb_node);
404 next = rb_next(&n->rb_node);
405 if (((zap_user && n->level == '.') ||
406 (zap_kernel && n->level != '.') ||
407 hists__decay_entry(hists, n))) {
408 hists__delete_entry(hists, n);
409 }
410 }
411 }
412
hists__delete_entries(struct hists * hists)413 void hists__delete_entries(struct hists *hists)
414 {
415 struct rb_node *next = rb_first_cached(&hists->entries);
416 struct hist_entry *n;
417
418 while (next) {
419 n = rb_entry(next, struct hist_entry, rb_node);
420 next = rb_next(&n->rb_node);
421
422 hists__delete_entry(hists, n);
423 }
424 }
425
hists__get_entry(struct hists * hists,int idx)426 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
427 {
428 struct rb_node *next = rb_first_cached(&hists->entries);
429 struct hist_entry *n;
430 int i = 0;
431
432 while (next) {
433 n = rb_entry(next, struct hist_entry, rb_node);
434 if (i == idx)
435 return n;
436
437 next = rb_next(&n->rb_node);
438 i++;
439 }
440
441 return NULL;
442 }
443
444 /*
445 * histogram, sorted on item, collects periods
446 */
447
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)448 static int hist_entry__init(struct hist_entry *he,
449 struct hist_entry *template,
450 bool sample_self,
451 size_t callchain_size)
452 {
453 *he = *template;
454 he->callchain_size = callchain_size;
455
456 if (symbol_conf.cumulate_callchain) {
457 he->stat_acc = malloc(sizeof(he->stat));
458 if (he->stat_acc == NULL)
459 return -ENOMEM;
460 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
461 if (!sample_self)
462 memset(&he->stat, 0, sizeof(he->stat));
463 }
464
465 he->ms.maps = maps__get(he->ms.maps);
466 he->ms.map = map__get(he->ms.map);
467
468 if (he->branch_info) {
469 /*
470 * This branch info is (a part of) allocated from
471 * sample__resolve_bstack() and will be freed after
472 * adding new entries. So we need to save a copy.
473 */
474 he->branch_info = malloc(sizeof(*he->branch_info));
475 if (he->branch_info == NULL)
476 goto err;
477
478 memcpy(he->branch_info, template->branch_info,
479 sizeof(*he->branch_info));
480
481 he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
482 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
483 he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
484 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
485 }
486
487 if (he->mem_info) {
488 he->mem_info = mem_info__clone(template->mem_info);
489 if (he->mem_info == NULL)
490 goto err_infos;
491 }
492
493 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
494 callchain_init(he->callchain);
495
496 if (he->raw_data) {
497 he->raw_data = memdup(he->raw_data, he->raw_size);
498 if (he->raw_data == NULL)
499 goto err_infos;
500 }
501
502 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
503 he->srcline = strdup(he->srcline);
504 if (he->srcline == NULL)
505 goto err_rawdata;
506 }
507
508 if (symbol_conf.res_sample) {
509 he->res_samples = calloc(symbol_conf.res_sample,
510 sizeof(struct res_sample));
511 if (!he->res_samples)
512 goto err_srcline;
513 }
514
515 INIT_LIST_HEAD(&he->pairs.node);
516 he->thread = thread__get(he->thread);
517 he->hroot_in = RB_ROOT_CACHED;
518 he->hroot_out = RB_ROOT_CACHED;
519
520 if (!symbol_conf.report_hierarchy)
521 he->leaf = true;
522
523 return 0;
524
525 err_srcline:
526 zfree(&he->srcline);
527
528 err_rawdata:
529 zfree(&he->raw_data);
530
531 err_infos:
532 if (he->branch_info) {
533 map_symbol__exit(&he->branch_info->from.ms);
534 map_symbol__exit(&he->branch_info->to.ms);
535 zfree(&he->branch_info);
536 }
537 if (he->mem_info) {
538 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
539 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
540 }
541 err:
542 map_symbol__exit(&he->ms);
543 zfree(&he->stat_acc);
544 return -ENOMEM;
545 }
546
hist_entry__zalloc(size_t size)547 static void *hist_entry__zalloc(size_t size)
548 {
549 return zalloc(size + sizeof(struct hist_entry));
550 }
551
hist_entry__free(void * ptr)552 static void hist_entry__free(void *ptr)
553 {
554 free(ptr);
555 }
556
557 static struct hist_entry_ops default_ops = {
558 .new = hist_entry__zalloc,
559 .free = hist_entry__free,
560 };
561
hist_entry__new(struct hist_entry * template,bool sample_self)562 static struct hist_entry *hist_entry__new(struct hist_entry *template,
563 bool sample_self)
564 {
565 struct hist_entry_ops *ops = template->ops;
566 size_t callchain_size = 0;
567 struct hist_entry *he;
568 int err = 0;
569
570 if (!ops)
571 ops = template->ops = &default_ops;
572
573 if (symbol_conf.use_callchain)
574 callchain_size = sizeof(struct callchain_root);
575
576 he = ops->new(callchain_size);
577 if (he) {
578 err = hist_entry__init(he, template, sample_self, callchain_size);
579 if (err) {
580 ops->free(he);
581 he = NULL;
582 }
583 }
584 return he;
585 }
586
symbol__parent_filter(const struct symbol * parent)587 static u8 symbol__parent_filter(const struct symbol *parent)
588 {
589 if (symbol_conf.exclude_other && parent == NULL)
590 return 1 << HIST_FILTER__PARENT;
591 return 0;
592 }
593
hist_entry__add_callchain_period(struct hist_entry * he,u64 period)594 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
595 {
596 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
597 return;
598
599 he->hists->callchain_period += period;
600 if (!he->filtered)
601 he->hists->callchain_non_filtered_period += period;
602 }
603
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,const struct addr_location * al,bool sample_self)604 static struct hist_entry *hists__findnew_entry(struct hists *hists,
605 struct hist_entry *entry,
606 const struct addr_location *al,
607 bool sample_self)
608 {
609 struct rb_node **p;
610 struct rb_node *parent = NULL;
611 struct hist_entry *he;
612 int64_t cmp;
613 u64 period = entry->stat.period;
614 bool leftmost = true;
615
616 p = &hists->entries_in->rb_root.rb_node;
617
618 while (*p != NULL) {
619 parent = *p;
620 he = rb_entry(parent, struct hist_entry, rb_node_in);
621
622 /*
623 * Make sure that it receives arguments in a same order as
624 * hist_entry__collapse() so that we can use an appropriate
625 * function when searching an entry regardless which sort
626 * keys were used.
627 */
628 cmp = hist_entry__cmp(he, entry);
629 if (!cmp) {
630 if (sample_self) {
631 he_stat__add_stat(&he->stat, &entry->stat);
632 hist_entry__add_callchain_period(he, period);
633 }
634 if (symbol_conf.cumulate_callchain)
635 he_stat__add_period(he->stat_acc, period);
636
637 block_info__delete(entry->block_info);
638
639 kvm_info__zput(entry->kvm_info);
640
641 /* If the map of an existing hist_entry has
642 * become out-of-date due to an exec() or
643 * similar, update it. Otherwise we will
644 * mis-adjust symbol addresses when computing
645 * the history counter to increment.
646 */
647 if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
648 if (he->ms.sym) {
649 u64 addr = he->ms.sym->start;
650 he->ms.sym = map__find_symbol(entry->ms.map, addr);
651 }
652
653 map__put(he->ms.map);
654 he->ms.map = map__get(entry->ms.map);
655 }
656 goto out;
657 }
658
659 if (cmp < 0)
660 p = &(*p)->rb_left;
661 else {
662 p = &(*p)->rb_right;
663 leftmost = false;
664 }
665 }
666
667 he = hist_entry__new(entry, sample_self);
668 if (!he)
669 return NULL;
670
671 if (sample_self)
672 hist_entry__add_callchain_period(he, period);
673 hists->nr_entries++;
674
675 rb_link_node(&he->rb_node_in, parent, p);
676 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
677 out:
678 if (sample_self)
679 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
680 if (symbol_conf.cumulate_callchain)
681 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
682 return he;
683 }
684
random_max(unsigned high)685 static unsigned random_max(unsigned high)
686 {
687 unsigned thresh = -high % high;
688 for (;;) {
689 unsigned r = random();
690 if (r >= thresh)
691 return r % high;
692 }
693 }
694
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)695 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
696 {
697 struct res_sample *r;
698 int j;
699
700 if (he->num_res < symbol_conf.res_sample) {
701 j = he->num_res++;
702 } else {
703 j = random_max(symbol_conf.res_sample);
704 }
705 r = &he->res_samples[j];
706 r->time = sample->time;
707 r->cpu = sample->cpu;
708 r->tid = sample->tid;
709 }
710
711 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)712 __hists__add_entry(struct hists *hists,
713 struct addr_location *al,
714 struct symbol *sym_parent,
715 struct branch_info *bi,
716 struct mem_info *mi,
717 struct kvm_info *ki,
718 struct block_info *block_info,
719 struct perf_sample *sample,
720 bool sample_self,
721 struct hist_entry_ops *ops)
722 {
723 struct namespaces *ns = thread__namespaces(al->thread);
724 struct hist_entry entry = {
725 .thread = al->thread,
726 .comm = thread__comm(al->thread),
727 .cgroup_id = {
728 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
729 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
730 },
731 .cgroup = sample->cgroup,
732 .ms = {
733 .maps = al->maps,
734 .map = al->map,
735 .sym = al->sym,
736 },
737 .srcline = (char *) al->srcline,
738 .socket = al->socket,
739 .cpu = al->cpu,
740 .cpumode = al->cpumode,
741 .ip = al->addr,
742 .level = al->level,
743 .code_page_size = sample->code_page_size,
744 .stat = {
745 .nr_events = 1,
746 .period = sample->period,
747 .weight1 = sample->weight,
748 .weight2 = sample->ins_lat,
749 .weight3 = sample->p_stage_cyc,
750 },
751 .parent = sym_parent,
752 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
753 .hists = hists,
754 .branch_info = bi,
755 .mem_info = mi,
756 .kvm_info = ki,
757 .block_info = block_info,
758 .transaction = sample->transaction,
759 .raw_data = sample->raw_data,
760 .raw_size = sample->raw_size,
761 .ops = ops,
762 .time = hist_time(sample->time),
763 .weight = sample->weight,
764 .ins_lat = sample->ins_lat,
765 .p_stage_cyc = sample->p_stage_cyc,
766 .simd_flags = sample->simd_flags,
767 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
768
769 if (!hists->has_callchains && he && he->callchain_size != 0)
770 hists->has_callchains = true;
771 if (he && symbol_conf.res_sample)
772 hists__res_sample(he, sample);
773 return he;
774 }
775
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)776 struct hist_entry *hists__add_entry(struct hists *hists,
777 struct addr_location *al,
778 struct symbol *sym_parent,
779 struct branch_info *bi,
780 struct mem_info *mi,
781 struct kvm_info *ki,
782 struct perf_sample *sample,
783 bool sample_self)
784 {
785 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
786 sample, sample_self, NULL);
787 }
788
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)789 struct hist_entry *hists__add_entry_ops(struct hists *hists,
790 struct hist_entry_ops *ops,
791 struct addr_location *al,
792 struct symbol *sym_parent,
793 struct branch_info *bi,
794 struct mem_info *mi,
795 struct kvm_info *ki,
796 struct perf_sample *sample,
797 bool sample_self)
798 {
799 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
800 sample, sample_self, ops);
801 }
802
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)803 struct hist_entry *hists__add_entry_block(struct hists *hists,
804 struct addr_location *al,
805 struct block_info *block_info)
806 {
807 struct hist_entry entry = {
808 .block_info = block_info,
809 .hists = hists,
810 .ms = {
811 .maps = al->maps,
812 .map = al->map,
813 .sym = al->sym,
814 },
815 }, *he = hists__findnew_entry(hists, &entry, al, false);
816
817 return he;
818 }
819
820 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)821 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
822 struct addr_location *al __maybe_unused)
823 {
824 return 0;
825 }
826
827 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)828 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
829 struct addr_location *al __maybe_unused)
830 {
831 return 0;
832 }
833
834 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)835 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
836 {
837 struct perf_sample *sample = iter->sample;
838 struct mem_info *mi;
839
840 mi = sample__resolve_mem(sample, al);
841 if (mi == NULL)
842 return -ENOMEM;
843
844 iter->mi = mi;
845 return 0;
846 }
847
848 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)849 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
850 {
851 u64 cost;
852 struct mem_info *mi = iter->mi;
853 struct hists *hists = evsel__hists(iter->evsel);
854 struct perf_sample *sample = iter->sample;
855 struct hist_entry *he;
856
857 if (mi == NULL)
858 return -EINVAL;
859
860 cost = sample->weight;
861 if (!cost)
862 cost = 1;
863
864 /*
865 * must pass period=weight in order to get the correct
866 * sorting from hists__collapse_resort() which is solely
867 * based on periods. We want sorting be done on nr_events * weight
868 * and this is indirectly achieved by passing period=weight here
869 * and the he_stat__add_period() function.
870 */
871 sample->period = cost;
872
873 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
874 sample, true);
875 if (!he)
876 return -ENOMEM;
877
878 iter->he = he;
879 return 0;
880 }
881
882 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)883 iter_finish_mem_entry(struct hist_entry_iter *iter,
884 struct addr_location *al __maybe_unused)
885 {
886 struct evsel *evsel = iter->evsel;
887 struct hists *hists = evsel__hists(evsel);
888 struct hist_entry *he = iter->he;
889 int err = -EINVAL;
890
891 if (he == NULL)
892 goto out;
893
894 hists__inc_nr_samples(hists, he->filtered);
895
896 err = hist_entry__append_callchain(he, iter->sample);
897
898 out:
899 mem_info__zput(iter->mi);
900
901 iter->he = NULL;
902 return err;
903 }
904
905 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)906 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
907 {
908 struct branch_info *bi;
909 struct perf_sample *sample = iter->sample;
910
911 bi = sample__resolve_bstack(sample, al);
912 if (!bi)
913 return -ENOMEM;
914
915 iter->curr = 0;
916 iter->total = sample->branch_stack->nr;
917
918 iter->bi = bi;
919 return 0;
920 }
921
922 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)923 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
924 struct addr_location *al __maybe_unused)
925 {
926 return 0;
927 }
928
929 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)930 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
931 {
932 struct branch_info *bi = iter->bi;
933 int i = iter->curr;
934
935 if (bi == NULL)
936 return 0;
937
938 if (iter->curr >= iter->total)
939 return 0;
940
941 maps__put(al->maps);
942 al->maps = maps__get(bi[i].to.ms.maps);
943 map__put(al->map);
944 al->map = map__get(bi[i].to.ms.map);
945 al->sym = bi[i].to.ms.sym;
946 al->addr = bi[i].to.addr;
947 return 1;
948 }
949
950 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)951 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
952 {
953 struct branch_info *bi;
954 struct evsel *evsel = iter->evsel;
955 struct hists *hists = evsel__hists(evsel);
956 struct perf_sample *sample = iter->sample;
957 struct hist_entry *he = NULL;
958 int i = iter->curr;
959 int err = 0;
960
961 bi = iter->bi;
962
963 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
964 goto out;
965
966 /*
967 * The report shows the percentage of total branches captured
968 * and not events sampled. Thus we use a pseudo period of 1.
969 */
970 sample->period = 1;
971 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
972
973 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
974 sample, true);
975 if (he == NULL)
976 return -ENOMEM;
977
978 hists__inc_nr_samples(hists, he->filtered);
979
980 out:
981 iter->he = he;
982 iter->curr++;
983 return err;
984 }
985
branch_info__exit(struct branch_info * bi)986 static void branch_info__exit(struct branch_info *bi)
987 {
988 map_symbol__exit(&bi->from.ms);
989 map_symbol__exit(&bi->to.ms);
990 zfree_srcline(&bi->srcline_from);
991 zfree_srcline(&bi->srcline_to);
992 }
993
994 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)995 iter_finish_branch_entry(struct hist_entry_iter *iter,
996 struct addr_location *al __maybe_unused)
997 {
998 for (int i = 0; i < iter->total; i++)
999 branch_info__exit(&iter->bi[i]);
1000
1001 zfree(&iter->bi);
1002 iter->he = NULL;
1003
1004 return iter->curr >= iter->total ? 0 : -1;
1005 }
1006
1007 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)1008 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
1009 struct addr_location *al __maybe_unused)
1010 {
1011 return 0;
1012 }
1013
1014 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)1015 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
1016 {
1017 struct evsel *evsel = iter->evsel;
1018 struct perf_sample *sample = iter->sample;
1019 struct hist_entry *he;
1020
1021 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1022 NULL, sample, true);
1023 if (he == NULL)
1024 return -ENOMEM;
1025
1026 iter->he = he;
1027 return 0;
1028 }
1029
1030 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1031 iter_finish_normal_entry(struct hist_entry_iter *iter,
1032 struct addr_location *al __maybe_unused)
1033 {
1034 struct hist_entry *he = iter->he;
1035 struct evsel *evsel = iter->evsel;
1036 struct perf_sample *sample = iter->sample;
1037
1038 if (he == NULL)
1039 return 0;
1040
1041 iter->he = NULL;
1042
1043 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1044
1045 return hist_entry__append_callchain(he, sample);
1046 }
1047
1048 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1049 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1050 struct addr_location *al __maybe_unused)
1051 {
1052 struct hist_entry **he_cache;
1053 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1054
1055 if (cursor == NULL)
1056 return -ENOMEM;
1057
1058 callchain_cursor_commit(cursor);
1059
1060 /*
1061 * This is for detecting cycles or recursions so that they're
1062 * cumulated only one time to prevent entries more than 100%
1063 * overhead.
1064 */
1065 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1066 if (he_cache == NULL)
1067 return -ENOMEM;
1068
1069 iter->he_cache = he_cache;
1070 iter->curr = 0;
1071
1072 return 0;
1073 }
1074
1075 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1076 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1077 struct addr_location *al)
1078 {
1079 struct evsel *evsel = iter->evsel;
1080 struct hists *hists = evsel__hists(evsel);
1081 struct perf_sample *sample = iter->sample;
1082 struct hist_entry **he_cache = iter->he_cache;
1083 struct hist_entry *he;
1084 int err = 0;
1085
1086 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1087 sample, true);
1088 if (he == NULL)
1089 return -ENOMEM;
1090
1091 iter->he = he;
1092 he_cache[iter->curr++] = he;
1093
1094 hist_entry__append_callchain(he, sample);
1095
1096 /*
1097 * We need to re-initialize the cursor since callchain_append()
1098 * advanced the cursor to the end.
1099 */
1100 callchain_cursor_commit(get_tls_callchain_cursor());
1101
1102 hists__inc_nr_samples(hists, he->filtered);
1103
1104 return err;
1105 }
1106
1107 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1108 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1109 struct addr_location *al)
1110 {
1111 struct callchain_cursor_node *node;
1112
1113 node = callchain_cursor_current(get_tls_callchain_cursor());
1114 if (node == NULL)
1115 return 0;
1116
1117 return fill_callchain_info(al, node, iter->hide_unresolved);
1118 }
1119
1120 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1121 hist_entry__fast__sym_diff(struct hist_entry *left,
1122 struct hist_entry *right)
1123 {
1124 struct symbol *sym_l = left->ms.sym;
1125 struct symbol *sym_r = right->ms.sym;
1126
1127 if (!sym_l && !sym_r)
1128 return left->ip != right->ip;
1129
1130 return !!_sort__sym_cmp(sym_l, sym_r);
1131 }
1132
1133
1134 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1135 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1136 struct addr_location *al)
1137 {
1138 struct evsel *evsel = iter->evsel;
1139 struct perf_sample *sample = iter->sample;
1140 struct hist_entry **he_cache = iter->he_cache;
1141 struct hist_entry *he;
1142 struct hist_entry he_tmp = {
1143 .hists = evsel__hists(evsel),
1144 .cpu = al->cpu,
1145 .thread = al->thread,
1146 .comm = thread__comm(al->thread),
1147 .ip = al->addr,
1148 .ms = {
1149 .maps = al->maps,
1150 .map = al->map,
1151 .sym = al->sym,
1152 },
1153 .srcline = (char *) al->srcline,
1154 .parent = iter->parent,
1155 .raw_data = sample->raw_data,
1156 .raw_size = sample->raw_size,
1157 };
1158 int i;
1159 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1160 bool fast = hists__has(he_tmp.hists, sym);
1161
1162 if (tls_cursor == NULL)
1163 return -ENOMEM;
1164
1165 callchain_cursor_snapshot(&cursor, tls_cursor);
1166
1167 callchain_cursor_advance(tls_cursor);
1168
1169 /*
1170 * Check if there's duplicate entries in the callchain.
1171 * It's possible that it has cycles or recursive calls.
1172 */
1173 for (i = 0; i < iter->curr; i++) {
1174 /*
1175 * For most cases, there are no duplicate entries in callchain.
1176 * The symbols are usually different. Do a quick check for
1177 * symbols first.
1178 */
1179 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1180 continue;
1181
1182 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1183 /* to avoid calling callback function */
1184 iter->he = NULL;
1185 return 0;
1186 }
1187 }
1188
1189 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1190 NULL, sample, false);
1191 if (he == NULL)
1192 return -ENOMEM;
1193
1194 iter->he = he;
1195 he_cache[iter->curr++] = he;
1196
1197 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1198 callchain_append(he->callchain, &cursor, sample->period);
1199 return 0;
1200 }
1201
1202 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1203 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1204 struct addr_location *al __maybe_unused)
1205 {
1206 mem_info__zput(iter->mi);
1207 zfree(&iter->bi);
1208 zfree(&iter->he_cache);
1209 iter->he = NULL;
1210
1211 return 0;
1212 }
1213
1214 const struct hist_iter_ops hist_iter_mem = {
1215 .prepare_entry = iter_prepare_mem_entry,
1216 .add_single_entry = iter_add_single_mem_entry,
1217 .next_entry = iter_next_nop_entry,
1218 .add_next_entry = iter_add_next_nop_entry,
1219 .finish_entry = iter_finish_mem_entry,
1220 };
1221
1222 const struct hist_iter_ops hist_iter_branch = {
1223 .prepare_entry = iter_prepare_branch_entry,
1224 .add_single_entry = iter_add_single_branch_entry,
1225 .next_entry = iter_next_branch_entry,
1226 .add_next_entry = iter_add_next_branch_entry,
1227 .finish_entry = iter_finish_branch_entry,
1228 };
1229
1230 const struct hist_iter_ops hist_iter_normal = {
1231 .prepare_entry = iter_prepare_normal_entry,
1232 .add_single_entry = iter_add_single_normal_entry,
1233 .next_entry = iter_next_nop_entry,
1234 .add_next_entry = iter_add_next_nop_entry,
1235 .finish_entry = iter_finish_normal_entry,
1236 };
1237
1238 const struct hist_iter_ops hist_iter_cumulative = {
1239 .prepare_entry = iter_prepare_cumulative_entry,
1240 .add_single_entry = iter_add_single_cumulative_entry,
1241 .next_entry = iter_next_cumulative_entry,
1242 .add_next_entry = iter_add_next_cumulative_entry,
1243 .finish_entry = iter_finish_cumulative_entry,
1244 };
1245
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1246 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1247 int max_stack_depth, void *arg)
1248 {
1249 int err, err2;
1250 struct map *alm = NULL;
1251
1252 if (al)
1253 alm = map__get(al->map);
1254
1255 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1256 iter->evsel, al, max_stack_depth);
1257 if (err) {
1258 map__put(alm);
1259 return err;
1260 }
1261
1262 err = iter->ops->prepare_entry(iter, al);
1263 if (err)
1264 goto out;
1265
1266 err = iter->ops->add_single_entry(iter, al);
1267 if (err)
1268 goto out;
1269
1270 if (iter->he && iter->add_entry_cb) {
1271 err = iter->add_entry_cb(iter, al, true, arg);
1272 if (err)
1273 goto out;
1274 }
1275
1276 while (iter->ops->next_entry(iter, al)) {
1277 err = iter->ops->add_next_entry(iter, al);
1278 if (err)
1279 break;
1280
1281 if (iter->he && iter->add_entry_cb) {
1282 err = iter->add_entry_cb(iter, al, false, arg);
1283 if (err)
1284 goto out;
1285 }
1286 }
1287
1288 out:
1289 err2 = iter->ops->finish_entry(iter, al);
1290 if (!err)
1291 err = err2;
1292
1293 map__put(alm);
1294
1295 return err;
1296 }
1297
1298 static int64_t
hist_entry__cmp_impl(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right,unsigned long fn_offset,bool ignore_dynamic,bool ignore_skipped)1299 hist_entry__cmp_impl(struct perf_hpp_list *hpp_list, struct hist_entry *left,
1300 struct hist_entry *right, unsigned long fn_offset,
1301 bool ignore_dynamic, bool ignore_skipped)
1302 {
1303 struct hists *hists = left->hists;
1304 struct perf_hpp_fmt *fmt;
1305 perf_hpp_fmt_cmp_t *fn;
1306 int64_t cmp;
1307
1308 /*
1309 * Never collapse filtered and non-filtered entries.
1310 * Note this is not the same as having an extra (invisible) fmt
1311 * that corresponds to the filtered status.
1312 */
1313 cmp = (int64_t)!!left->filtered - (int64_t)!!right->filtered;
1314 if (cmp)
1315 return cmp;
1316
1317 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1318 if (ignore_dynamic && perf_hpp__is_dynamic_entry(fmt) &&
1319 !perf_hpp__defined_dynamic_entry(fmt, hists))
1320 continue;
1321
1322 if (ignore_skipped && perf_hpp__should_skip(fmt, hists))
1323 continue;
1324
1325 fn = (void *)fmt + fn_offset;
1326 cmp = (*fn)(fmt, left, right);
1327 if (cmp)
1328 break;
1329 }
1330
1331 return cmp;
1332 }
1333
1334 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1335 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1336 {
1337 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1338 offsetof(struct perf_hpp_fmt, cmp), true, false);
1339 }
1340
1341 static int64_t
hist_entry__sort(struct hist_entry * left,struct hist_entry * right)1342 hist_entry__sort(struct hist_entry *left, struct hist_entry *right)
1343 {
1344 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1345 offsetof(struct perf_hpp_fmt, sort), false, true);
1346 }
1347
1348 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1349 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1350 {
1351 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1352 offsetof(struct perf_hpp_fmt, collapse), true, false);
1353 }
1354
1355 static int64_t
hist_entry__collapse_hierarchy(struct perf_hpp_list * hpp_list,struct hist_entry * left,struct hist_entry * right)1356 hist_entry__collapse_hierarchy(struct perf_hpp_list *hpp_list,
1357 struct hist_entry *left,
1358 struct hist_entry *right)
1359 {
1360 return hist_entry__cmp_impl(hpp_list, left, right,
1361 offsetof(struct perf_hpp_fmt, collapse), false, false);
1362 }
1363
hist_entry__delete(struct hist_entry * he)1364 void hist_entry__delete(struct hist_entry *he)
1365 {
1366 struct hist_entry_ops *ops = he->ops;
1367
1368 thread__zput(he->thread);
1369 map_symbol__exit(&he->ms);
1370
1371 if (he->branch_info) {
1372 branch_info__exit(he->branch_info);
1373 zfree(&he->branch_info);
1374 }
1375
1376 if (he->mem_info) {
1377 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
1378 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
1379 mem_info__zput(he->mem_info);
1380 }
1381
1382 if (he->block_info)
1383 block_info__delete(he->block_info);
1384
1385 if (he->kvm_info)
1386 kvm_info__zput(he->kvm_info);
1387
1388 zfree(&he->res_samples);
1389 zfree(&he->stat_acc);
1390 zfree_srcline(&he->srcline);
1391 if (he->srcfile && he->srcfile[0])
1392 zfree(&he->srcfile);
1393 free_callchain(he->callchain);
1394 zfree(&he->trace_output);
1395 zfree(&he->raw_data);
1396 ops->free(he);
1397 }
1398
1399 /*
1400 * If this is not the last column, then we need to pad it according to the
1401 * pre-calculated max length for this column, otherwise don't bother adding
1402 * spaces because that would break viewing this with, for instance, 'less',
1403 * that would show tons of trailing spaces when a long C++ demangled method
1404 * names is sampled.
1405 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1406 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1407 struct perf_hpp_fmt *fmt, int printed)
1408 {
1409 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1410 const int width = fmt->width(fmt, hpp, he->hists);
1411 if (printed < width) {
1412 advance_hpp(hpp, printed);
1413 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1414 }
1415 }
1416
1417 return printed;
1418 }
1419
1420 /*
1421 * collapse the histogram
1422 */
1423
1424 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1425 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1426 enum hist_filter type);
1427
1428 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1429
check_thread_entry(struct perf_hpp_fmt * fmt)1430 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1431 {
1432 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1433 }
1434
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1435 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1436 enum hist_filter type,
1437 fmt_chk_fn check)
1438 {
1439 struct perf_hpp_fmt *fmt;
1440 bool type_match = false;
1441 struct hist_entry *parent = he->parent_he;
1442
1443 switch (type) {
1444 case HIST_FILTER__THREAD:
1445 if (symbol_conf.comm_list == NULL &&
1446 symbol_conf.pid_list == NULL &&
1447 symbol_conf.tid_list == NULL)
1448 return;
1449 break;
1450 case HIST_FILTER__DSO:
1451 if (symbol_conf.dso_list == NULL)
1452 return;
1453 break;
1454 case HIST_FILTER__SYMBOL:
1455 if (symbol_conf.sym_list == NULL)
1456 return;
1457 break;
1458 case HIST_FILTER__PARENT:
1459 case HIST_FILTER__GUEST:
1460 case HIST_FILTER__HOST:
1461 case HIST_FILTER__SOCKET:
1462 case HIST_FILTER__C2C:
1463 default:
1464 return;
1465 }
1466
1467 /* if it's filtered by own fmt, it has to have filter bits */
1468 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1469 if (check(fmt)) {
1470 type_match = true;
1471 break;
1472 }
1473 }
1474
1475 if (type_match) {
1476 /*
1477 * If the filter is for current level entry, propagate
1478 * filter marker to parents. The marker bit was
1479 * already set by default so it only needs to clear
1480 * non-filtered entries.
1481 */
1482 if (!(he->filtered & (1 << type))) {
1483 while (parent) {
1484 parent->filtered &= ~(1 << type);
1485 parent = parent->parent_he;
1486 }
1487 }
1488 } else {
1489 /*
1490 * If current entry doesn't have matching formats, set
1491 * filter marker for upper level entries. it will be
1492 * cleared if its lower level entries is not filtered.
1493 *
1494 * For lower-level entries, it inherits parent's
1495 * filter bit so that lower level entries of a
1496 * non-filtered entry won't set the filter marker.
1497 */
1498 if (parent == NULL)
1499 he->filtered |= (1 << type);
1500 else
1501 he->filtered |= (parent->filtered & (1 << type));
1502 }
1503 }
1504
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1505 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1506 {
1507 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1508 check_thread_entry);
1509
1510 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1511 perf_hpp__is_dso_entry);
1512
1513 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1514 perf_hpp__is_sym_entry);
1515
1516 hists__apply_filters(he->hists, he);
1517 }
1518
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1519 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1520 struct rb_root_cached *root,
1521 struct hist_entry *he,
1522 struct hist_entry *parent_he,
1523 struct perf_hpp_list *hpp_list)
1524 {
1525 struct rb_node **p = &root->rb_root.rb_node;
1526 struct rb_node *parent = NULL;
1527 struct hist_entry *iter, *new;
1528 struct perf_hpp_fmt *fmt;
1529 int64_t cmp;
1530 bool leftmost = true;
1531
1532 while (*p != NULL) {
1533 parent = *p;
1534 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1535 cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
1536 if (!cmp) {
1537 he_stat__add_stat(&iter->stat, &he->stat);
1538 return iter;
1539 }
1540
1541 if (cmp < 0)
1542 p = &parent->rb_left;
1543 else {
1544 p = &parent->rb_right;
1545 leftmost = false;
1546 }
1547 }
1548
1549 new = hist_entry__new(he, true);
1550 if (new == NULL)
1551 return NULL;
1552
1553 hists->nr_entries++;
1554
1555 /* save related format list for output */
1556 new->hpp_list = hpp_list;
1557 new->parent_he = parent_he;
1558
1559 hist_entry__apply_hierarchy_filters(new);
1560
1561 /* some fields are now passed to 'new' */
1562 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1563 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1564 he->trace_output = NULL;
1565 else
1566 new->trace_output = NULL;
1567
1568 if (perf_hpp__is_srcline_entry(fmt))
1569 he->srcline = NULL;
1570 else
1571 new->srcline = NULL;
1572
1573 if (perf_hpp__is_srcfile_entry(fmt))
1574 he->srcfile = NULL;
1575 else
1576 new->srcfile = NULL;
1577 }
1578
1579 rb_link_node(&new->rb_node_in, parent, p);
1580 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1581 return new;
1582 }
1583
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1584 static int hists__hierarchy_insert_entry(struct hists *hists,
1585 struct rb_root_cached *root,
1586 struct hist_entry *he)
1587 {
1588 struct perf_hpp_list_node *node;
1589 struct hist_entry *new_he = NULL;
1590 struct hist_entry *parent = NULL;
1591 int depth = 0;
1592 int ret = 0;
1593
1594 list_for_each_entry(node, &hists->hpp_formats, list) {
1595 /* skip period (overhead) and elided columns */
1596 if (node->level == 0 || node->skip)
1597 continue;
1598
1599 /* insert copy of 'he' for each fmt into the hierarchy */
1600 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1601 if (new_he == NULL) {
1602 ret = -1;
1603 break;
1604 }
1605
1606 root = &new_he->hroot_in;
1607 new_he->depth = depth++;
1608 parent = new_he;
1609 }
1610
1611 if (new_he) {
1612 new_he->leaf = true;
1613
1614 if (hist_entry__has_callchains(new_he) &&
1615 symbol_conf.use_callchain) {
1616 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1617
1618 if (cursor == NULL)
1619 return -1;
1620
1621 callchain_cursor_reset(cursor);
1622 if (callchain_merge(cursor,
1623 new_he->callchain,
1624 he->callchain) < 0)
1625 ret = -1;
1626 }
1627 }
1628
1629 /* 'he' is no longer used */
1630 hist_entry__delete(he);
1631
1632 /* return 0 (or -1) since it already applied filters */
1633 return ret;
1634 }
1635
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1636 static int hists__collapse_insert_entry(struct hists *hists,
1637 struct rb_root_cached *root,
1638 struct hist_entry *he)
1639 {
1640 struct rb_node **p = &root->rb_root.rb_node;
1641 struct rb_node *parent = NULL;
1642 struct hist_entry *iter;
1643 int64_t cmp;
1644 bool leftmost = true;
1645
1646 if (symbol_conf.report_hierarchy)
1647 return hists__hierarchy_insert_entry(hists, root, he);
1648
1649 while (*p != NULL) {
1650 parent = *p;
1651 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1652
1653 cmp = hist_entry__collapse(iter, he);
1654
1655 if (!cmp) {
1656 int ret = 0;
1657
1658 he_stat__add_stat(&iter->stat, &he->stat);
1659 if (symbol_conf.cumulate_callchain)
1660 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1661
1662 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1663 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1664
1665 if (cursor != NULL) {
1666 callchain_cursor_reset(cursor);
1667 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1668 ret = -1;
1669 } else {
1670 ret = 0;
1671 }
1672 }
1673 hist_entry__delete(he);
1674 return ret;
1675 }
1676
1677 if (cmp < 0)
1678 p = &(*p)->rb_left;
1679 else {
1680 p = &(*p)->rb_right;
1681 leftmost = false;
1682 }
1683 }
1684 hists->nr_entries++;
1685
1686 rb_link_node(&he->rb_node_in, parent, p);
1687 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1688 return 1;
1689 }
1690
hists__get_rotate_entries_in(struct hists * hists)1691 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1692 {
1693 struct rb_root_cached *root;
1694
1695 mutex_lock(&hists->lock);
1696
1697 root = hists->entries_in;
1698 if (++hists->entries_in > &hists->entries_in_array[1])
1699 hists->entries_in = &hists->entries_in_array[0];
1700
1701 mutex_unlock(&hists->lock);
1702
1703 return root;
1704 }
1705
hists__apply_filters(struct hists * hists,struct hist_entry * he)1706 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1707 {
1708 hists__filter_entry_by_dso(hists, he);
1709 hists__filter_entry_by_thread(hists, he);
1710 hists__filter_entry_by_symbol(hists, he);
1711 hists__filter_entry_by_socket(hists, he);
1712 }
1713
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1714 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1715 {
1716 struct rb_root_cached *root;
1717 struct rb_node *next;
1718 struct hist_entry *n;
1719 int ret;
1720
1721 if (!hists__has(hists, need_collapse))
1722 return 0;
1723
1724 hists->nr_entries = 0;
1725
1726 root = hists__get_rotate_entries_in(hists);
1727
1728 next = rb_first_cached(root);
1729
1730 while (next) {
1731 if (session_done())
1732 break;
1733 n = rb_entry(next, struct hist_entry, rb_node_in);
1734 next = rb_next(&n->rb_node_in);
1735
1736 rb_erase_cached(&n->rb_node_in, root);
1737 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1738 if (ret < 0)
1739 return -1;
1740
1741 if (ret) {
1742 /*
1743 * If it wasn't combined with one of the entries already
1744 * collapsed, we need to apply the filters that may have
1745 * been set by, say, the hist_browser.
1746 */
1747 hists__apply_filters(hists, n);
1748 }
1749 if (prog)
1750 ui_progress__update(prog, 1);
1751 }
1752 return 0;
1753 }
1754
hists__reset_filter_stats(struct hists * hists)1755 static void hists__reset_filter_stats(struct hists *hists)
1756 {
1757 hists->nr_non_filtered_entries = 0;
1758 hists->stats.total_non_filtered_period = 0;
1759 }
1760
hists__reset_stats(struct hists * hists)1761 void hists__reset_stats(struct hists *hists)
1762 {
1763 hists->nr_entries = 0;
1764 hists->stats.total_period = 0;
1765
1766 hists__reset_filter_stats(hists);
1767 }
1768
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1769 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1770 {
1771 hists->nr_non_filtered_entries++;
1772 hists->stats.total_non_filtered_period += h->stat.period;
1773 }
1774
hists__inc_stats(struct hists * hists,struct hist_entry * h)1775 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1776 {
1777 if (!h->filtered)
1778 hists__inc_filter_stats(hists, h);
1779
1780 hists->nr_entries++;
1781 hists->stats.total_period += h->stat.period;
1782 }
1783
hierarchy_recalc_total_periods(struct hists * hists)1784 static void hierarchy_recalc_total_periods(struct hists *hists)
1785 {
1786 struct rb_node *node;
1787 struct hist_entry *he;
1788
1789 node = rb_first_cached(&hists->entries);
1790
1791 hists->stats.total_period = 0;
1792 hists->stats.total_non_filtered_period = 0;
1793
1794 /*
1795 * recalculate total period using top-level entries only
1796 * since lower level entries only see non-filtered entries
1797 * but upper level entries have sum of both entries.
1798 */
1799 while (node) {
1800 he = rb_entry(node, struct hist_entry, rb_node);
1801 node = rb_next(node);
1802
1803 hists->stats.total_period += he->stat.period;
1804 if (!he->filtered)
1805 hists->stats.total_non_filtered_period += he->stat.period;
1806 }
1807 }
1808
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1809 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1810 struct hist_entry *he)
1811 {
1812 struct rb_node **p = &root->rb_root.rb_node;
1813 struct rb_node *parent = NULL;
1814 struct hist_entry *iter;
1815 struct perf_hpp_fmt *fmt;
1816 bool leftmost = true;
1817
1818 while (*p != NULL) {
1819 parent = *p;
1820 iter = rb_entry(parent, struct hist_entry, rb_node);
1821
1822 if (hist_entry__sort(he, iter) > 0)
1823 p = &parent->rb_left;
1824 else {
1825 p = &parent->rb_right;
1826 leftmost = false;
1827 }
1828 }
1829
1830 rb_link_node(&he->rb_node, parent, p);
1831 rb_insert_color_cached(&he->rb_node, root, leftmost);
1832
1833 /* update column width of dynamic entry */
1834 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1835 if (fmt->init)
1836 fmt->init(fmt, he);
1837 }
1838 }
1839
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1840 static void hists__hierarchy_output_resort(struct hists *hists,
1841 struct ui_progress *prog,
1842 struct rb_root_cached *root_in,
1843 struct rb_root_cached *root_out,
1844 u64 min_callchain_hits,
1845 bool use_callchain)
1846 {
1847 struct rb_node *node;
1848 struct hist_entry *he;
1849
1850 *root_out = RB_ROOT_CACHED;
1851 node = rb_first_cached(root_in);
1852
1853 while (node) {
1854 he = rb_entry(node, struct hist_entry, rb_node_in);
1855 node = rb_next(node);
1856
1857 hierarchy_insert_output_entry(root_out, he);
1858
1859 if (prog)
1860 ui_progress__update(prog, 1);
1861
1862 hists->nr_entries++;
1863 if (!he->filtered) {
1864 hists->nr_non_filtered_entries++;
1865 hists__calc_col_len(hists, he);
1866 }
1867
1868 if (!he->leaf) {
1869 hists__hierarchy_output_resort(hists, prog,
1870 &he->hroot_in,
1871 &he->hroot_out,
1872 min_callchain_hits,
1873 use_callchain);
1874 continue;
1875 }
1876
1877 if (!use_callchain)
1878 continue;
1879
1880 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1881 u64 total = he->stat.period;
1882
1883 if (symbol_conf.cumulate_callchain)
1884 total = he->stat_acc->period;
1885
1886 min_callchain_hits = total * (callchain_param.min_percent / 100);
1887 }
1888
1889 callchain_param.sort(&he->sorted_chain, he->callchain,
1890 min_callchain_hits, &callchain_param);
1891 }
1892 }
1893
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)1894 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1895 struct hist_entry *he,
1896 u64 min_callchain_hits,
1897 bool use_callchain)
1898 {
1899 struct rb_node **p = &entries->rb_root.rb_node;
1900 struct rb_node *parent = NULL;
1901 struct hist_entry *iter;
1902 struct perf_hpp_fmt *fmt;
1903 bool leftmost = true;
1904
1905 if (use_callchain) {
1906 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1907 u64 total = he->stat.period;
1908
1909 if (symbol_conf.cumulate_callchain)
1910 total = he->stat_acc->period;
1911
1912 min_callchain_hits = total * (callchain_param.min_percent / 100);
1913 }
1914 callchain_param.sort(&he->sorted_chain, he->callchain,
1915 min_callchain_hits, &callchain_param);
1916 }
1917
1918 while (*p != NULL) {
1919 parent = *p;
1920 iter = rb_entry(parent, struct hist_entry, rb_node);
1921
1922 if (hist_entry__sort(he, iter) > 0)
1923 p = &(*p)->rb_left;
1924 else {
1925 p = &(*p)->rb_right;
1926 leftmost = false;
1927 }
1928 }
1929
1930 rb_link_node(&he->rb_node, parent, p);
1931 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1932
1933 /* update column width of dynamic entries */
1934 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1935 if (fmt->init)
1936 fmt->init(fmt, he);
1937 }
1938 }
1939
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)1940 static void output_resort(struct hists *hists, struct ui_progress *prog,
1941 bool use_callchain, hists__resort_cb_t cb,
1942 void *cb_arg)
1943 {
1944 struct rb_root_cached *root;
1945 struct rb_node *next;
1946 struct hist_entry *n;
1947 u64 callchain_total;
1948 u64 min_callchain_hits;
1949
1950 callchain_total = hists->callchain_period;
1951 if (symbol_conf.filter_relative)
1952 callchain_total = hists->callchain_non_filtered_period;
1953
1954 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1955
1956 hists__reset_stats(hists);
1957 hists__reset_col_len(hists);
1958
1959 if (symbol_conf.report_hierarchy) {
1960 hists__hierarchy_output_resort(hists, prog,
1961 &hists->entries_collapsed,
1962 &hists->entries,
1963 min_callchain_hits,
1964 use_callchain);
1965 hierarchy_recalc_total_periods(hists);
1966 return;
1967 }
1968
1969 if (hists__has(hists, need_collapse))
1970 root = &hists->entries_collapsed;
1971 else
1972 root = hists->entries_in;
1973
1974 next = rb_first_cached(root);
1975 hists->entries = RB_ROOT_CACHED;
1976
1977 while (next) {
1978 n = rb_entry(next, struct hist_entry, rb_node_in);
1979 next = rb_next(&n->rb_node_in);
1980
1981 if (cb && cb(n, cb_arg))
1982 continue;
1983
1984 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1985 hists__inc_stats(hists, n);
1986
1987 if (!n->filtered)
1988 hists__calc_col_len(hists, n);
1989
1990 if (prog)
1991 ui_progress__update(prog, 1);
1992 }
1993 }
1994
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)1995 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1996 hists__resort_cb_t cb, void *cb_arg)
1997 {
1998 bool use_callchain;
1999
2000 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
2001 use_callchain = evsel__has_callchain(evsel);
2002 else
2003 use_callchain = symbol_conf.use_callchain;
2004
2005 use_callchain |= symbol_conf.show_branchflag_count;
2006
2007 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
2008 }
2009
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)2010 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
2011 {
2012 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
2013 }
2014
hists__output_resort(struct hists * hists,struct ui_progress * prog)2015 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
2016 {
2017 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
2018 }
2019
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)2020 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2021 hists__resort_cb_t cb)
2022 {
2023 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2024 }
2025
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)2026 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2027 {
2028 if (he->leaf || hmd == HMD_FORCE_SIBLING)
2029 return false;
2030
2031 if (he->unfolded || hmd == HMD_FORCE_CHILD)
2032 return true;
2033
2034 return false;
2035 }
2036
rb_hierarchy_last(struct rb_node * node)2037 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2038 {
2039 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2040
2041 while (can_goto_child(he, HMD_NORMAL)) {
2042 node = rb_last(&he->hroot_out.rb_root);
2043 he = rb_entry(node, struct hist_entry, rb_node);
2044 }
2045 return node;
2046 }
2047
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)2048 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2049 {
2050 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2051
2052 if (can_goto_child(he, hmd))
2053 node = rb_first_cached(&he->hroot_out);
2054 else
2055 node = rb_next(node);
2056
2057 while (node == NULL) {
2058 he = he->parent_he;
2059 if (he == NULL)
2060 break;
2061
2062 node = rb_next(&he->rb_node);
2063 }
2064 return node;
2065 }
2066
rb_hierarchy_prev(struct rb_node * node)2067 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2068 {
2069 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2070
2071 node = rb_prev(node);
2072 if (node)
2073 return rb_hierarchy_last(node);
2074
2075 he = he->parent_he;
2076 if (he == NULL)
2077 return NULL;
2078
2079 return &he->rb_node;
2080 }
2081
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2082 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2083 {
2084 struct rb_node *node;
2085 struct hist_entry *child;
2086 float percent;
2087
2088 if (he->leaf)
2089 return false;
2090
2091 node = rb_first_cached(&he->hroot_out);
2092 child = rb_entry(node, struct hist_entry, rb_node);
2093
2094 while (node && child->filtered) {
2095 node = rb_next(node);
2096 child = rb_entry(node, struct hist_entry, rb_node);
2097 }
2098
2099 if (node)
2100 percent = hist_entry__get_percent_limit(child);
2101 else
2102 percent = 0;
2103
2104 return node && percent >= limit;
2105 }
2106
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2107 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2108 enum hist_filter filter)
2109 {
2110 h->filtered &= ~(1 << filter);
2111
2112 if (symbol_conf.report_hierarchy) {
2113 struct hist_entry *parent = h->parent_he;
2114
2115 while (parent) {
2116 he_stat__add_stat(&parent->stat, &h->stat);
2117
2118 parent->filtered &= ~(1 << filter);
2119
2120 if (parent->filtered)
2121 goto next;
2122
2123 /* force fold unfiltered entry for simplicity */
2124 parent->unfolded = false;
2125 parent->has_no_entry = false;
2126 parent->row_offset = 0;
2127 parent->nr_rows = 0;
2128 next:
2129 parent = parent->parent_he;
2130 }
2131 }
2132
2133 if (h->filtered)
2134 return;
2135
2136 /* force fold unfiltered entry for simplicity */
2137 h->unfolded = false;
2138 h->has_no_entry = false;
2139 h->row_offset = 0;
2140 h->nr_rows = 0;
2141
2142 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2143
2144 hists__inc_filter_stats(hists, h);
2145 hists__calc_col_len(hists, h);
2146 }
2147
2148
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2149 static bool hists__filter_entry_by_dso(struct hists *hists,
2150 struct hist_entry *he)
2151 {
2152 if (hists->dso_filter != NULL &&
2153 (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
2154 he->filtered |= (1 << HIST_FILTER__DSO);
2155 return true;
2156 }
2157
2158 return false;
2159 }
2160
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2161 static bool hists__filter_entry_by_thread(struct hists *hists,
2162 struct hist_entry *he)
2163 {
2164 if (hists->thread_filter != NULL &&
2165 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
2166 he->filtered |= (1 << HIST_FILTER__THREAD);
2167 return true;
2168 }
2169
2170 return false;
2171 }
2172
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2173 static bool hists__filter_entry_by_symbol(struct hists *hists,
2174 struct hist_entry *he)
2175 {
2176 if (hists->symbol_filter_str != NULL &&
2177 (!he->ms.sym || strstr(he->ms.sym->name,
2178 hists->symbol_filter_str) == NULL)) {
2179 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2180 return true;
2181 }
2182
2183 return false;
2184 }
2185
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2186 static bool hists__filter_entry_by_socket(struct hists *hists,
2187 struct hist_entry *he)
2188 {
2189 if ((hists->socket_filter > -1) &&
2190 (he->socket != hists->socket_filter)) {
2191 he->filtered |= (1 << HIST_FILTER__SOCKET);
2192 return true;
2193 }
2194
2195 return false;
2196 }
2197
2198 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2199
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2200 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2201 {
2202 struct rb_node *nd;
2203
2204 hists->stats.nr_non_filtered_samples = 0;
2205
2206 hists__reset_filter_stats(hists);
2207 hists__reset_col_len(hists);
2208
2209 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2210 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2211
2212 if (filter(hists, h))
2213 continue;
2214
2215 hists__remove_entry_filter(hists, h, type);
2216 }
2217 }
2218
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2219 static void resort_filtered_entry(struct rb_root_cached *root,
2220 struct hist_entry *he)
2221 {
2222 struct rb_node **p = &root->rb_root.rb_node;
2223 struct rb_node *parent = NULL;
2224 struct hist_entry *iter;
2225 struct rb_root_cached new_root = RB_ROOT_CACHED;
2226 struct rb_node *nd;
2227 bool leftmost = true;
2228
2229 while (*p != NULL) {
2230 parent = *p;
2231 iter = rb_entry(parent, struct hist_entry, rb_node);
2232
2233 if (hist_entry__sort(he, iter) > 0)
2234 p = &(*p)->rb_left;
2235 else {
2236 p = &(*p)->rb_right;
2237 leftmost = false;
2238 }
2239 }
2240
2241 rb_link_node(&he->rb_node, parent, p);
2242 rb_insert_color_cached(&he->rb_node, root, leftmost);
2243
2244 if (he->leaf || he->filtered)
2245 return;
2246
2247 nd = rb_first_cached(&he->hroot_out);
2248 while (nd) {
2249 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2250
2251 nd = rb_next(nd);
2252 rb_erase_cached(&h->rb_node, &he->hroot_out);
2253
2254 resort_filtered_entry(&new_root, h);
2255 }
2256
2257 he->hroot_out = new_root;
2258 }
2259
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2260 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2261 {
2262 struct rb_node *nd;
2263 struct rb_root_cached new_root = RB_ROOT_CACHED;
2264
2265 hists->stats.nr_non_filtered_samples = 0;
2266
2267 hists__reset_filter_stats(hists);
2268 hists__reset_col_len(hists);
2269
2270 nd = rb_first_cached(&hists->entries);
2271 while (nd) {
2272 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2273 int ret;
2274
2275 ret = hist_entry__filter(h, type, arg);
2276
2277 /*
2278 * case 1. non-matching type
2279 * zero out the period, set filter marker and move to child
2280 */
2281 if (ret < 0) {
2282 memset(&h->stat, 0, sizeof(h->stat));
2283 h->filtered |= (1 << type);
2284
2285 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2286 }
2287 /*
2288 * case 2. matched type (filter out)
2289 * set filter marker and move to next
2290 */
2291 else if (ret == 1) {
2292 h->filtered |= (1 << type);
2293
2294 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2295 }
2296 /*
2297 * case 3. ok (not filtered)
2298 * add period to hists and parents, erase the filter marker
2299 * and move to next sibling
2300 */
2301 else {
2302 hists__remove_entry_filter(hists, h, type);
2303
2304 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2305 }
2306 }
2307
2308 hierarchy_recalc_total_periods(hists);
2309
2310 /*
2311 * resort output after applying a new filter since filter in a lower
2312 * hierarchy can change periods in a upper hierarchy.
2313 */
2314 nd = rb_first_cached(&hists->entries);
2315 while (nd) {
2316 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2317
2318 nd = rb_next(nd);
2319 rb_erase_cached(&h->rb_node, &hists->entries);
2320
2321 resort_filtered_entry(&new_root, h);
2322 }
2323
2324 hists->entries = new_root;
2325 }
2326
hists__filter_by_thread(struct hists * hists)2327 void hists__filter_by_thread(struct hists *hists)
2328 {
2329 if (symbol_conf.report_hierarchy)
2330 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2331 hists->thread_filter);
2332 else
2333 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2334 hists__filter_entry_by_thread);
2335 }
2336
hists__filter_by_dso(struct hists * hists)2337 void hists__filter_by_dso(struct hists *hists)
2338 {
2339 if (symbol_conf.report_hierarchy)
2340 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2341 hists->dso_filter);
2342 else
2343 hists__filter_by_type(hists, HIST_FILTER__DSO,
2344 hists__filter_entry_by_dso);
2345 }
2346
hists__filter_by_symbol(struct hists * hists)2347 void hists__filter_by_symbol(struct hists *hists)
2348 {
2349 if (symbol_conf.report_hierarchy)
2350 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2351 hists->symbol_filter_str);
2352 else
2353 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2354 hists__filter_entry_by_symbol);
2355 }
2356
hists__filter_by_socket(struct hists * hists)2357 void hists__filter_by_socket(struct hists *hists)
2358 {
2359 if (symbol_conf.report_hierarchy)
2360 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2361 &hists->socket_filter);
2362 else
2363 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2364 hists__filter_entry_by_socket);
2365 }
2366
events_stats__inc(struct events_stats * stats,u32 type)2367 void events_stats__inc(struct events_stats *stats, u32 type)
2368 {
2369 ++stats->nr_events[0];
2370 ++stats->nr_events[type];
2371 }
2372
hists_stats__inc(struct hists_stats * stats)2373 static void hists_stats__inc(struct hists_stats *stats)
2374 {
2375 ++stats->nr_samples;
2376 }
2377
hists__inc_nr_events(struct hists * hists)2378 void hists__inc_nr_events(struct hists *hists)
2379 {
2380 hists_stats__inc(&hists->stats);
2381 }
2382
hists__inc_nr_samples(struct hists * hists,bool filtered)2383 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2384 {
2385 hists_stats__inc(&hists->stats);
2386 if (!filtered)
2387 hists->stats.nr_non_filtered_samples++;
2388 }
2389
hists__inc_nr_lost_samples(struct hists * hists,u32 lost)2390 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2391 {
2392 hists->stats.nr_lost_samples += lost;
2393 }
2394
hists__inc_nr_dropped_samples(struct hists * hists,u32 lost)2395 void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
2396 {
2397 hists->stats.nr_dropped_samples += lost;
2398 }
2399
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2400 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2401 struct hist_entry *pair)
2402 {
2403 struct rb_root_cached *root;
2404 struct rb_node **p;
2405 struct rb_node *parent = NULL;
2406 struct hist_entry *he;
2407 int64_t cmp;
2408 bool leftmost = true;
2409
2410 if (hists__has(hists, need_collapse))
2411 root = &hists->entries_collapsed;
2412 else
2413 root = hists->entries_in;
2414
2415 p = &root->rb_root.rb_node;
2416
2417 while (*p != NULL) {
2418 parent = *p;
2419 he = rb_entry(parent, struct hist_entry, rb_node_in);
2420
2421 cmp = hist_entry__collapse(he, pair);
2422
2423 if (!cmp)
2424 goto out;
2425
2426 if (cmp < 0)
2427 p = &(*p)->rb_left;
2428 else {
2429 p = &(*p)->rb_right;
2430 leftmost = false;
2431 }
2432 }
2433
2434 he = hist_entry__new(pair, true);
2435 if (he) {
2436 memset(&he->stat, 0, sizeof(he->stat));
2437 he->hists = hists;
2438 if (symbol_conf.cumulate_callchain)
2439 memset(he->stat_acc, 0, sizeof(he->stat));
2440 rb_link_node(&he->rb_node_in, parent, p);
2441 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2442 hists__inc_stats(hists, he);
2443 he->dummy = true;
2444 }
2445 out:
2446 return he;
2447 }
2448
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2449 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2450 struct rb_root_cached *root,
2451 struct hist_entry *pair)
2452 {
2453 struct rb_node **p;
2454 struct rb_node *parent = NULL;
2455 struct hist_entry *he;
2456 bool leftmost = true;
2457
2458 p = &root->rb_root.rb_node;
2459 while (*p != NULL) {
2460 int64_t cmp;
2461
2462 parent = *p;
2463 he = rb_entry(parent, struct hist_entry, rb_node_in);
2464 cmp = hist_entry__collapse_hierarchy(he->hpp_list, he, pair);
2465 if (!cmp)
2466 goto out;
2467
2468 if (cmp < 0)
2469 p = &parent->rb_left;
2470 else {
2471 p = &parent->rb_right;
2472 leftmost = false;
2473 }
2474 }
2475
2476 he = hist_entry__new(pair, true);
2477 if (he) {
2478 rb_link_node(&he->rb_node_in, parent, p);
2479 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2480
2481 he->dummy = true;
2482 he->hists = hists;
2483 memset(&he->stat, 0, sizeof(he->stat));
2484 hists__inc_stats(hists, he);
2485 }
2486 out:
2487 return he;
2488 }
2489
hists__find_entry(struct hists * hists,struct hist_entry * he)2490 static struct hist_entry *hists__find_entry(struct hists *hists,
2491 struct hist_entry *he)
2492 {
2493 struct rb_node *n;
2494
2495 if (hists__has(hists, need_collapse))
2496 n = hists->entries_collapsed.rb_root.rb_node;
2497 else
2498 n = hists->entries_in->rb_root.rb_node;
2499
2500 while (n) {
2501 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2502 int64_t cmp = hist_entry__collapse(iter, he);
2503
2504 if (cmp < 0)
2505 n = n->rb_left;
2506 else if (cmp > 0)
2507 n = n->rb_right;
2508 else
2509 return iter;
2510 }
2511
2512 return NULL;
2513 }
2514
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2515 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2516 struct hist_entry *he)
2517 {
2518 struct rb_node *n = root->rb_root.rb_node;
2519
2520 while (n) {
2521 struct hist_entry *iter;
2522 int64_t cmp;
2523
2524 iter = rb_entry(n, struct hist_entry, rb_node_in);
2525 cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
2526 if (cmp < 0)
2527 n = n->rb_left;
2528 else if (cmp > 0)
2529 n = n->rb_right;
2530 else
2531 return iter;
2532 }
2533
2534 return NULL;
2535 }
2536
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2537 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2538 struct rb_root_cached *other_root)
2539 {
2540 struct rb_node *nd;
2541 struct hist_entry *pos, *pair;
2542
2543 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2544 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2545 pair = hists__find_hierarchy_entry(other_root, pos);
2546
2547 if (pair) {
2548 hist_entry__add_pair(pair, pos);
2549 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2550 }
2551 }
2552 }
2553
2554 /*
2555 * Look for pairs to link to the leader buckets (hist_entries):
2556 */
hists__match(struct hists * leader,struct hists * other)2557 void hists__match(struct hists *leader, struct hists *other)
2558 {
2559 struct rb_root_cached *root;
2560 struct rb_node *nd;
2561 struct hist_entry *pos, *pair;
2562
2563 if (symbol_conf.report_hierarchy) {
2564 /* hierarchy report always collapses entries */
2565 return hists__match_hierarchy(&leader->entries_collapsed,
2566 &other->entries_collapsed);
2567 }
2568
2569 if (hists__has(leader, need_collapse))
2570 root = &leader->entries_collapsed;
2571 else
2572 root = leader->entries_in;
2573
2574 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2575 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2576 pair = hists__find_entry(other, pos);
2577
2578 if (pair)
2579 hist_entry__add_pair(pair, pos);
2580 }
2581 }
2582
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2583 static int hists__link_hierarchy(struct hists *leader_hists,
2584 struct hist_entry *parent,
2585 struct rb_root_cached *leader_root,
2586 struct rb_root_cached *other_root)
2587 {
2588 struct rb_node *nd;
2589 struct hist_entry *pos, *leader;
2590
2591 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2592 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2593
2594 if (hist_entry__has_pairs(pos)) {
2595 bool found = false;
2596
2597 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2598 if (leader->hists == leader_hists) {
2599 found = true;
2600 break;
2601 }
2602 }
2603 if (!found)
2604 return -1;
2605 } else {
2606 leader = add_dummy_hierarchy_entry(leader_hists,
2607 leader_root, pos);
2608 if (leader == NULL)
2609 return -1;
2610
2611 /* do not point parent in the pos */
2612 leader->parent_he = parent;
2613
2614 hist_entry__add_pair(pos, leader);
2615 }
2616
2617 if (!pos->leaf) {
2618 if (hists__link_hierarchy(leader_hists, leader,
2619 &leader->hroot_in,
2620 &pos->hroot_in) < 0)
2621 return -1;
2622 }
2623 }
2624 return 0;
2625 }
2626
2627 /*
2628 * Look for entries in the other hists that are not present in the leader, if
2629 * we find them, just add a dummy entry on the leader hists, with period=0,
2630 * nr_events=0, to serve as the list header.
2631 */
hists__link(struct hists * leader,struct hists * other)2632 int hists__link(struct hists *leader, struct hists *other)
2633 {
2634 struct rb_root_cached *root;
2635 struct rb_node *nd;
2636 struct hist_entry *pos, *pair;
2637
2638 if (symbol_conf.report_hierarchy) {
2639 /* hierarchy report always collapses entries */
2640 return hists__link_hierarchy(leader, NULL,
2641 &leader->entries_collapsed,
2642 &other->entries_collapsed);
2643 }
2644
2645 if (hists__has(other, need_collapse))
2646 root = &other->entries_collapsed;
2647 else
2648 root = other->entries_in;
2649
2650 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2651 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2652
2653 if (!hist_entry__has_pairs(pos)) {
2654 pair = hists__add_dummy_entry(leader, pos);
2655 if (pair == NULL)
2656 return -1;
2657 hist_entry__add_pair(pos, pair);
2658 }
2659 }
2660
2661 return 0;
2662 }
2663
hists__unlink(struct hists * hists)2664 int hists__unlink(struct hists *hists)
2665 {
2666 struct rb_root_cached *root;
2667 struct rb_node *nd;
2668 struct hist_entry *pos;
2669
2670 if (hists__has(hists, need_collapse))
2671 root = &hists->entries_collapsed;
2672 else
2673 root = hists->entries_in;
2674
2675 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2676 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2677 list_del_init(&pos->pairs.node);
2678 }
2679
2680 return 0;
2681 }
2682
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles,struct evsel * evsel)2683 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2684 struct perf_sample *sample, bool nonany_branch_mode,
2685 u64 *total_cycles, struct evsel *evsel)
2686 {
2687 struct branch_info *bi;
2688 struct branch_entry *entries = perf_sample__branch_entries(sample);
2689
2690 /* If we have branch cycles always annotate them. */
2691 if (bs && bs->nr && entries[0].flags.cycles) {
2692 bi = sample__resolve_bstack(sample, al);
2693 if (bi) {
2694 struct addr_map_symbol *prev = NULL;
2695
2696 /*
2697 * Ignore errors, still want to process the
2698 * other entries.
2699 *
2700 * For non standard branch modes always
2701 * force no IPC (prev == NULL)
2702 *
2703 * Note that perf stores branches reversed from
2704 * program order!
2705 */
2706 for (int i = bs->nr - 1; i >= 0; i--) {
2707 addr_map_symbol__account_cycles(&bi[i].from,
2708 nonany_branch_mode ? NULL : prev,
2709 bi[i].flags.cycles, evsel,
2710 bi[i].branch_stack_cntr);
2711 prev = &bi[i].to;
2712
2713 if (total_cycles)
2714 *total_cycles += bi[i].flags.cycles;
2715 }
2716 for (unsigned int i = 0; i < bs->nr; i++) {
2717 map_symbol__exit(&bi[i].to.ms);
2718 map_symbol__exit(&bi[i].from.ms);
2719 }
2720 free(bi);
2721 }
2722 }
2723 }
2724
evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp)2725 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2726 {
2727 struct evsel *pos;
2728 size_t ret = 0;
2729
2730 evlist__for_each_entry(evlist, pos) {
2731 struct hists *hists = evsel__hists(pos);
2732 u64 total_samples = hists->stats.nr_samples;
2733
2734 total_samples += hists->stats.nr_lost_samples;
2735 total_samples += hists->stats.nr_dropped_samples;
2736
2737 if (symbol_conf.skip_empty && total_samples == 0)
2738 continue;
2739
2740 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2741 if (hists->stats.nr_samples)
2742 ret += fprintf(fp, "%20s events: %10d\n",
2743 "SAMPLE", hists->stats.nr_samples);
2744 if (hists->stats.nr_lost_samples)
2745 ret += fprintf(fp, "%20s events: %10d\n",
2746 "LOST_SAMPLES", hists->stats.nr_lost_samples);
2747 if (hists->stats.nr_dropped_samples)
2748 ret += fprintf(fp, "%20s events: %10d\n",
2749 "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
2750 }
2751
2752 return ret;
2753 }
2754
2755
hists__total_period(struct hists * hists)2756 u64 hists__total_period(struct hists *hists)
2757 {
2758 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2759 hists->stats.total_period;
2760 }
2761
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2762 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2763 {
2764 char unit;
2765 int printed;
2766 const struct dso *dso = hists->dso_filter;
2767 struct thread *thread = hists->thread_filter;
2768 int socket_id = hists->socket_filter;
2769 unsigned long nr_samples = hists->stats.nr_samples;
2770 u64 nr_events = hists->stats.total_period;
2771 struct evsel *evsel = hists_to_evsel(hists);
2772 const char *ev_name = evsel__name(evsel);
2773 char buf[512], sample_freq_str[64] = "";
2774 size_t buflen = sizeof(buf);
2775 char ref[30] = " show reference callgraph, ";
2776 bool enable_ref = false;
2777
2778 if (symbol_conf.filter_relative) {
2779 nr_samples = hists->stats.nr_non_filtered_samples;
2780 nr_events = hists->stats.total_non_filtered_period;
2781 }
2782
2783 if (evsel__is_group_event(evsel)) {
2784 struct evsel *pos;
2785
2786 evsel__group_desc(evsel, buf, buflen);
2787 ev_name = buf;
2788
2789 for_each_group_member(pos, evsel) {
2790 struct hists *pos_hists = evsel__hists(pos);
2791
2792 if (symbol_conf.filter_relative) {
2793 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2794 nr_events += pos_hists->stats.total_non_filtered_period;
2795 } else {
2796 nr_samples += pos_hists->stats.nr_samples;
2797 nr_events += pos_hists->stats.total_period;
2798 }
2799 }
2800 }
2801
2802 if (symbol_conf.show_ref_callgraph &&
2803 strstr(ev_name, "call-graph=no"))
2804 enable_ref = true;
2805
2806 if (show_freq)
2807 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2808
2809 nr_samples = convert_unit(nr_samples, &unit);
2810 printed = scnprintf(bf, size,
2811 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2812 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2813 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2814
2815
2816 if (hists->uid_filter_str)
2817 printed += snprintf(bf + printed, size - printed,
2818 ", UID: %s", hists->uid_filter_str);
2819 if (thread) {
2820 if (hists__has(hists, thread)) {
2821 printed += scnprintf(bf + printed, size - printed,
2822 ", Thread: %s(%d)",
2823 (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2824 thread__tid(thread));
2825 } else {
2826 printed += scnprintf(bf + printed, size - printed,
2827 ", Thread: %s",
2828 (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2829 }
2830 }
2831 if (dso)
2832 printed += scnprintf(bf + printed, size - printed,
2833 ", DSO: %s", dso__short_name(dso));
2834 if (socket_id > -1)
2835 printed += scnprintf(bf + printed, size - printed,
2836 ", Processor Socket: %d", socket_id);
2837
2838 return printed;
2839 }
2840
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2841 int parse_filter_percentage(const struct option *opt __maybe_unused,
2842 const char *arg, int unset __maybe_unused)
2843 {
2844 if (!strcmp(arg, "relative"))
2845 symbol_conf.filter_relative = true;
2846 else if (!strcmp(arg, "absolute"))
2847 symbol_conf.filter_relative = false;
2848 else {
2849 pr_debug("Invalid percentage: %s\n", arg);
2850 return -1;
2851 }
2852
2853 return 0;
2854 }
2855
perf_hist_config(const char * var,const char * value)2856 int perf_hist_config(const char *var, const char *value)
2857 {
2858 if (!strcmp(var, "hist.percentage"))
2859 return parse_filter_percentage(NULL, value, 0);
2860
2861 return 0;
2862 }
2863
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)2864 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2865 {
2866 memset(hists, 0, sizeof(*hists));
2867 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2868 hists->entries_in = &hists->entries_in_array[0];
2869 hists->entries_collapsed = RB_ROOT_CACHED;
2870 hists->entries = RB_ROOT_CACHED;
2871 mutex_init(&hists->lock);
2872 hists->socket_filter = -1;
2873 hists->hpp_list = hpp_list;
2874 INIT_LIST_HEAD(&hists->hpp_formats);
2875 return 0;
2876 }
2877
hists__delete_remaining_entries(struct rb_root_cached * root)2878 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2879 {
2880 struct rb_node *node;
2881 struct hist_entry *he;
2882
2883 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2884 node = rb_first_cached(root);
2885 rb_erase_cached(node, root);
2886
2887 he = rb_entry(node, struct hist_entry, rb_node_in);
2888 hist_entry__delete(he);
2889 }
2890 }
2891
hists__delete_all_entries(struct hists * hists)2892 static void hists__delete_all_entries(struct hists *hists)
2893 {
2894 hists__delete_entries(hists);
2895 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2896 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2897 hists__delete_remaining_entries(&hists->entries_collapsed);
2898 }
2899
hists_evsel__exit(struct evsel * evsel)2900 static void hists_evsel__exit(struct evsel *evsel)
2901 {
2902 struct hists *hists = evsel__hists(evsel);
2903 struct perf_hpp_fmt *fmt, *pos;
2904 struct perf_hpp_list_node *node, *tmp;
2905
2906 hists__delete_all_entries(hists);
2907
2908 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2909 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2910 list_del_init(&fmt->list);
2911 free(fmt);
2912 }
2913 list_del_init(&node->list);
2914 free(node);
2915 }
2916 }
2917
hists_evsel__init(struct evsel * evsel)2918 static int hists_evsel__init(struct evsel *evsel)
2919 {
2920 struct hists *hists = evsel__hists(evsel);
2921
2922 __hists__init(hists, &perf_hpp_list);
2923 return 0;
2924 }
2925
2926 /*
2927 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2928 * stored in the rbtree...
2929 */
2930
hists__init(void)2931 int hists__init(void)
2932 {
2933 int err = evsel__object_config(sizeof(struct hists_evsel),
2934 hists_evsel__init, hists_evsel__exit);
2935 if (err)
2936 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2937
2938 return err;
2939 }
2940
perf_hpp_list__init(struct perf_hpp_list * list)2941 void perf_hpp_list__init(struct perf_hpp_list *list)
2942 {
2943 INIT_LIST_HEAD(&list->fields);
2944 INIT_LIST_HEAD(&list->sorts);
2945 }
2946