1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 
5 #include "util/build-id.h"
6 #include "util/evsel.h"
7 #include "util/evlist.h"
8 #include "util/mmap.h"
9 #include "util/term.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/intlist.h"
15 #include <subcmd/pager.h>
16 #include <subcmd/parse-options.h>
17 #include "util/trace-event.h"
18 #include "util/debug.h"
19 #include "util/tool.h"
20 #include "util/stat.h"
21 #include "util/synthetic-events.h"
22 #include "util/top.h"
23 #include "util/data.h"
24 #include "util/ordered-events.h"
25 #include "util/kvm-stat.h"
26 #include "util/util.h"
27 #include "ui/browsers/hists.h"
28 #include "ui/progress.h"
29 #include "ui/ui.h"
30 #include "util/string2.h"
31 
32 #include <sys/prctl.h>
33 #ifdef HAVE_TIMERFD_SUPPORT
34 #include <sys/timerfd.h>
35 #endif
36 #include <sys/time.h>
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <fcntl.h>
40 
41 #include <linux/err.h>
42 #include <linux/kernel.h>
43 #include <linux/string.h>
44 #include <linux/time64.h>
45 #include <linux/zalloc.h>
46 #include <errno.h>
47 #include <inttypes.h>
48 #include <poll.h>
49 #include <termios.h>
50 #include <semaphore.h>
51 #include <signal.h>
52 #include <math.h>
53 #include <perf/mmap.h>
54 
55 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
56 #define GET_EVENT_KEY(func, field)					\
57 static u64 get_event_ ##func(struct kvm_event *event, int vcpu)		\
58 {									\
59 	if (vcpu == -1)							\
60 		return event->total.field;				\
61 									\
62 	if (vcpu >= event->max_vcpu)					\
63 		return 0;						\
64 									\
65 	return event->vcpu[vcpu].field;					\
66 }
67 
68 #define COMPARE_EVENT_KEY(func, field)					\
69 GET_EVENT_KEY(func, field)						\
70 static int64_t cmp_event_ ## func(struct kvm_event *one,		\
71 			      struct kvm_event *two, int vcpu)		\
72 {									\
73 	return get_event_ ##func(one, vcpu) -				\
74 	       get_event_ ##func(two, vcpu);				\
75 }
76 
77 COMPARE_EVENT_KEY(time, time);
78 COMPARE_EVENT_KEY(max, stats.max);
79 COMPARE_EVENT_KEY(min, stats.min);
80 COMPARE_EVENT_KEY(count, stats.n);
81 COMPARE_EVENT_KEY(mean, stats.mean);
82 
83 struct kvm_hists {
84 	struct hists		hists;
85 	struct perf_hpp_list	list;
86 };
87 
88 struct kvm_dimension {
89 	const char *name;
90 	const char *header;
91 	int width;
92 	int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left,
93 		       struct hist_entry *right);
94 	int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
95 		     struct hist_entry *he);
96 };
97 
98 struct kvm_fmt {
99 	struct perf_hpp_fmt	fmt;
100 	struct kvm_dimension	*dim;
101 };
102 
103 static struct kvm_hists kvm_hists;
104 
ev_name_cmp(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)105 static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
106 			   struct hist_entry *left,
107 			   struct hist_entry *right)
108 {
109 	/* Return opposite number for sorting in alphabetical order */
110 	return -strcmp(left->kvm_info->name, right->kvm_info->name);
111 }
112 
113 static int fmt_width(struct perf_hpp_fmt *fmt,
114 		     struct perf_hpp *hpp __maybe_unused,
115 		     struct hists *hists __maybe_unused);
116 
ev_name_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)117 static int ev_name_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
118 			 struct hist_entry *he)
119 {
120 	int width = fmt_width(fmt, hpp, he->hists);
121 
122 	return scnprintf(hpp->buf, hpp->size, "%*s", width, he->kvm_info->name);
123 }
124 
125 static struct kvm_dimension dim_event = {
126 	.header		= "Event name",
127 	.name		= "ev_name",
128 	.cmp		= ev_name_cmp,
129 	.entry		= ev_name_entry,
130 	.width		= 40,
131 };
132 
133 #define EV_METRIC_CMP(metric)						\
134 static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused,	\
135 			       struct hist_entry *left,			\
136 			       struct hist_entry *right)		\
137 {									\
138 	struct kvm_event *event_left;					\
139 	struct kvm_event *event_right;					\
140 	struct perf_kvm_stat *perf_kvm;					\
141 									\
142 	event_left  = container_of(left, struct kvm_event, he);		\
143 	event_right = container_of(right, struct kvm_event, he);	\
144 									\
145 	perf_kvm = event_left->perf_kvm;				\
146 	return cmp_event_##metric(event_left, event_right,		\
147 				  perf_kvm->trace_vcpu);		\
148 }
149 
150 EV_METRIC_CMP(time)
151 EV_METRIC_CMP(count)
152 EV_METRIC_CMP(max)
153 EV_METRIC_CMP(min)
154 EV_METRIC_CMP(mean)
155 
156 #define EV_METRIC_ENTRY(metric)						\
157 static int ev_entry_##metric(struct perf_hpp_fmt *fmt,			\
158 			     struct perf_hpp *hpp,			\
159 			     struct hist_entry *he)			\
160 {									\
161 	struct kvm_event *event;					\
162 	int width = fmt_width(fmt, hpp, he->hists);			\
163 	struct perf_kvm_stat *perf_kvm;					\
164 									\
165 	event = container_of(he, struct kvm_event, he);			\
166 	perf_kvm = event->perf_kvm;					\
167 	return scnprintf(hpp->buf, hpp->size, "%*lu", width,		\
168 		get_event_##metric(event, perf_kvm->trace_vcpu));	\
169 }
170 
171 EV_METRIC_ENTRY(time)
172 EV_METRIC_ENTRY(count)
173 EV_METRIC_ENTRY(max)
174 EV_METRIC_ENTRY(min)
175 
176 static struct kvm_dimension dim_time = {
177 	.header		= "Time (ns)",
178 	.name		= "time",
179 	.cmp		= ev_cmp_time,
180 	.entry		= ev_entry_time,
181 	.width		= 12,
182 };
183 
184 static struct kvm_dimension dim_count = {
185 	.header		= "Samples",
186 	.name		= "sample",
187 	.cmp		= ev_cmp_count,
188 	.entry		= ev_entry_count,
189 	.width		= 12,
190 };
191 
192 static struct kvm_dimension dim_max_time = {
193 	.header		= "Max Time (ns)",
194 	.name		= "max_t",
195 	.cmp		= ev_cmp_max,
196 	.entry		= ev_entry_max,
197 	.width		= 14,
198 };
199 
200 static struct kvm_dimension dim_min_time = {
201 	.header		= "Min Time (ns)",
202 	.name		= "min_t",
203 	.cmp		= ev_cmp_min,
204 	.entry		= ev_entry_min,
205 	.width		= 14,
206 };
207 
ev_entry_mean(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)208 static int ev_entry_mean(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
209 			 struct hist_entry *he)
210 {
211 	struct kvm_event *event;
212 	int width = fmt_width(fmt, hpp, he->hists);
213 	struct perf_kvm_stat *perf_kvm;
214 
215 	event = container_of(he, struct kvm_event, he);
216 	perf_kvm = event->perf_kvm;
217 	return scnprintf(hpp->buf, hpp->size, "%*lu", width,
218 			 get_event_mean(event, perf_kvm->trace_vcpu));
219 }
220 
221 static struct kvm_dimension dim_mean_time = {
222 	.header		= "Mean Time (ns)",
223 	.name		= "mean_t",
224 	.cmp		= ev_cmp_mean,
225 	.entry		= ev_entry_mean,
226 	.width		= 14,
227 };
228 
229 #define PERC_STR(__s, __v)				\
230 ({							\
231 	scnprintf(__s, sizeof(__s), "%.2F%%", __v);	\
232 	__s;						\
233 })
234 
percent(u64 st,u64 tot)235 static double percent(u64 st, u64 tot)
236 {
237 	return tot ? 100. * (double) st / (double) tot : 0;
238 }
239 
240 #define EV_METRIC_PERCENT(metric)					\
241 static int ev_percent_##metric(struct hist_entry *he)			\
242 {									\
243 	struct kvm_event *event;					\
244 	struct perf_kvm_stat *perf_kvm;					\
245 									\
246 	event = container_of(he, struct kvm_event, he);			\
247 	perf_kvm = event->perf_kvm;					\
248 									\
249 	return percent(get_event_##metric(event, perf_kvm->trace_vcpu),	\
250 		       perf_kvm->total_##metric);			\
251 }
252 
253 EV_METRIC_PERCENT(time)
EV_METRIC_PERCENT(count)254 EV_METRIC_PERCENT(count)
255 
256 static int ev_entry_time_precent(struct perf_hpp_fmt *fmt,
257 				 struct perf_hpp *hpp,
258 				 struct hist_entry *he)
259 {
260 	int width = fmt_width(fmt, hpp, he->hists);
261 	double per;
262 	char buf[10];
263 
264 	per = ev_percent_time(he);
265 	return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
266 }
267 
268 static int64_t
ev_cmp_time_precent(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)269 ev_cmp_time_precent(struct perf_hpp_fmt *fmt __maybe_unused,
270 		    struct hist_entry *left, struct hist_entry *right)
271 {
272 	double per_left;
273 	double per_right;
274 
275 	per_left  = ev_percent_time(left);
276 	per_right = ev_percent_time(right);
277 
278 	return per_left - per_right;
279 }
280 
281 static struct kvm_dimension dim_time_percent = {
282 	.header		= "Time%",
283 	.name		= "percent_time",
284 	.cmp		= ev_cmp_time_precent,
285 	.entry		= ev_entry_time_precent,
286 	.width		= 12,
287 };
288 
ev_entry_count_precent(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)289 static int ev_entry_count_precent(struct perf_hpp_fmt *fmt,
290 				  struct perf_hpp *hpp,
291 				  struct hist_entry *he)
292 {
293 	int width = fmt_width(fmt, hpp, he->hists);
294 	double per;
295 	char buf[10];
296 
297 	per = ev_percent_count(he);
298 	return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
299 }
300 
301 static int64_t
ev_cmp_count_precent(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)302 ev_cmp_count_precent(struct perf_hpp_fmt *fmt __maybe_unused,
303 		     struct hist_entry *left, struct hist_entry *right)
304 {
305 	double per_left;
306 	double per_right;
307 
308 	per_left  = ev_percent_count(left);
309 	per_right = ev_percent_count(right);
310 
311 	return per_left - per_right;
312 }
313 
314 static struct kvm_dimension dim_count_percent = {
315 	.header		= "Sample%",
316 	.name		= "percent_sample",
317 	.cmp		= ev_cmp_count_precent,
318 	.entry		= ev_entry_count_precent,
319 	.width		= 12,
320 };
321 
322 static struct kvm_dimension *dimensions[] = {
323 	&dim_event,
324 	&dim_time,
325 	&dim_time_percent,
326 	&dim_count,
327 	&dim_count_percent,
328 	&dim_max_time,
329 	&dim_min_time,
330 	&dim_mean_time,
331 	NULL,
332 };
333 
fmt_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)334 static int fmt_width(struct perf_hpp_fmt *fmt,
335 		     struct perf_hpp *hpp __maybe_unused,
336 		     struct hists *hists __maybe_unused)
337 {
338 	struct kvm_fmt *kvm_fmt;
339 
340 	kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
341 	return kvm_fmt->dim->width;
342 }
343 
fmt_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)344 static int fmt_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
345 		      struct hists *hists, int line __maybe_unused,
346 		      int *span __maybe_unused)
347 {
348 	struct kvm_fmt *kvm_fmt;
349 	struct kvm_dimension *dim;
350 	int width = fmt_width(fmt, hpp, hists);
351 
352 	kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
353 	dim = kvm_fmt->dim;
354 
355 	return scnprintf(hpp->buf, hpp->size, "%*s", width, dim->header);
356 }
357 
fmt_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)358 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
359 {
360 	struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt);
361 	struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt);
362 
363 	return kvm_fmt_a->dim == kvm_fmt_b->dim;
364 }
365 
fmt_free(struct perf_hpp_fmt * fmt)366 static void fmt_free(struct perf_hpp_fmt *fmt)
367 {
368 	struct kvm_fmt *kvm_fmt;
369 
370 	kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
371 	free(kvm_fmt);
372 }
373 
get_dimension(const char * name)374 static struct kvm_dimension *get_dimension(const char *name)
375 {
376 	unsigned int i;
377 
378 	for (i = 0; dimensions[i] != NULL; i++) {
379 		if (!strcmp(dimensions[i]->name, name))
380 			return dimensions[i];
381 	}
382 
383 	return NULL;
384 }
385 
get_format(const char * name)386 static struct kvm_fmt *get_format(const char *name)
387 {
388 	struct kvm_dimension *dim = get_dimension(name);
389 	struct kvm_fmt *kvm_fmt;
390 	struct perf_hpp_fmt *fmt;
391 
392 	if (!dim)
393 		return NULL;
394 
395 	kvm_fmt = zalloc(sizeof(*kvm_fmt));
396 	if (!kvm_fmt)
397 		return NULL;
398 
399 	kvm_fmt->dim = dim;
400 
401 	fmt = &kvm_fmt->fmt;
402 	INIT_LIST_HEAD(&fmt->list);
403 	INIT_LIST_HEAD(&fmt->sort_list);
404 	fmt->cmp	= dim->cmp;
405 	fmt->sort	= dim->cmp;
406 	fmt->color	= NULL;
407 	fmt->entry	= dim->entry;
408 	fmt->header	= fmt_header;
409 	fmt->width	= fmt_width;
410 	fmt->collapse	= dim->cmp;
411 	fmt->equal	= fmt_equal;
412 	fmt->free	= fmt_free;
413 
414 	return kvm_fmt;
415 }
416 
kvm_hists__init_output(struct perf_hpp_list * hpp_list,char * name)417 static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
418 {
419 	struct kvm_fmt *kvm_fmt = get_format(name);
420 
421 	if (!kvm_fmt) {
422 		pr_warning("Fail to find format for output field %s.\n", name);
423 		return -EINVAL;
424 	}
425 
426 	perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt);
427 	return 0;
428 }
429 
kvm_hists__init_sort(struct perf_hpp_list * hpp_list,char * name)430 static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
431 {
432 	struct kvm_fmt *kvm_fmt = get_format(name);
433 
434 	if (!kvm_fmt) {
435 		pr_warning("Fail to find format for sorting %s.\n", name);
436 		return -EINVAL;
437 	}
438 
439 	perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt);
440 	return 0;
441 }
442 
kvm_hpp_list__init(char * list,struct perf_hpp_list * hpp_list,int (* fn)(struct perf_hpp_list * hpp_list,char * name))443 static int kvm_hpp_list__init(char *list,
444 			      struct perf_hpp_list *hpp_list,
445 			      int (*fn)(struct perf_hpp_list *hpp_list,
446 					char *name))
447 {
448 	char *tmp, *tok;
449 	int ret;
450 
451 	if (!list || !fn)
452 		return 0;
453 
454 	for (tok = strtok_r(list, ", ", &tmp); tok;
455 	     tok = strtok_r(NULL, ", ", &tmp)) {
456 		ret = fn(hpp_list, tok);
457 		if (!ret)
458 			continue;
459 
460 		/* Handle errors */
461 		if (ret == -EINVAL)
462 			pr_err("Invalid field key: '%s'", tok);
463 		else if (ret == -ESRCH)
464 			pr_err("Unknown field key: '%s'", tok);
465 		else
466 			pr_err("Fail to initialize for field key: '%s'", tok);
467 
468 		break;
469 	}
470 
471 	return ret;
472 }
473 
kvm_hpp_list__parse(struct perf_hpp_list * hpp_list,const char * output_,const char * sort_)474 static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list,
475 			       const char *output_, const char *sort_)
476 {
477 	char *output = output_ ? strdup(output_) : NULL;
478 	char *sort = sort_ ? strdup(sort_) : NULL;
479 	int ret;
480 
481 	ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output);
482 	if (ret)
483 		goto out;
484 
485 	ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort);
486 	if (ret)
487 		goto out;
488 
489 	/* Copy sort keys to output fields */
490 	perf_hpp__setup_output_field(hpp_list);
491 
492 	/* and then copy output fields to sort keys */
493 	perf_hpp__append_sort_keys(hpp_list);
494 out:
495 	free(output);
496 	free(sort);
497 	return ret;
498 }
499 
kvm_hists__init(void)500 static int kvm_hists__init(void)
501 {
502 	kvm_hists.list.nr_header_lines = 1;
503 	__hists__init(&kvm_hists.hists, &kvm_hists.list);
504 	perf_hpp_list__init(&kvm_hists.list);
505 	return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
506 }
507 
kvm_hists__reinit(const char * output,const char * sort)508 static int kvm_hists__reinit(const char *output, const char *sort)
509 {
510 	perf_hpp__reset_output_field(&kvm_hists.list);
511 	return kvm_hpp_list__parse(&kvm_hists.list, output, sort);
512 }
513 static void print_result(struct perf_kvm_stat *kvm);
514 
515 #ifdef HAVE_SLANG_SUPPORT
kvm_browser__update_nr_entries(struct hist_browser * hb)516 static void kvm_browser__update_nr_entries(struct hist_browser *hb)
517 {
518 	struct rb_node *nd = rb_first_cached(&hb->hists->entries);
519 	u64 nr_entries = 0;
520 
521 	for (; nd; nd = rb_next(nd)) {
522 		struct hist_entry *he = rb_entry(nd, struct hist_entry,
523 						 rb_node);
524 
525 		if (!he->filtered)
526 			nr_entries++;
527 	}
528 
529 	hb->nr_non_filtered_entries = nr_entries;
530 }
531 
kvm_browser__title(struct hist_browser * browser,char * buf,size_t size)532 static int kvm_browser__title(struct hist_browser *browser,
533 			      char *buf, size_t size)
534 {
535 	scnprintf(buf, size, "KVM event statistics (%lu entries)",
536 		  browser->nr_non_filtered_entries);
537 	return 0;
538 }
539 
540 static struct hist_browser*
perf_kvm_browser__new(struct hists * hists)541 perf_kvm_browser__new(struct hists *hists)
542 {
543 	struct hist_browser *browser = hist_browser__new(hists);
544 
545 	if (browser)
546 		browser->title = kvm_browser__title;
547 
548 	return browser;
549 }
550 
kvm__hists_browse(struct hists * hists)551 static int kvm__hists_browse(struct hists *hists)
552 {
553 	struct hist_browser *browser;
554 	int key = -1;
555 
556 	browser = perf_kvm_browser__new(hists);
557 	if (browser == NULL)
558 		return -1;
559 
560 	/* reset abort key so that it can get Ctrl-C as a key */
561 	SLang_reset_tty();
562 	SLang_init_tty(0, 0, 0);
563 
564 	kvm_browser__update_nr_entries(browser);
565 
566 	while (1) {
567 		key = hist_browser__run(browser, "? - help", true, 0);
568 
569 		switch (key) {
570 		case 'q':
571 			goto out;
572 		default:
573 			break;
574 		}
575 	}
576 
577 out:
578 	hist_browser__delete(browser);
579 	return 0;
580 }
581 
kvm_display(struct perf_kvm_stat * kvm)582 static void kvm_display(struct perf_kvm_stat *kvm)
583 {
584 	if (!use_browser)
585 		print_result(kvm);
586 	else
587 		kvm__hists_browse(&kvm_hists.hists);
588 }
589 
590 #else
591 
kvm_display(struct perf_kvm_stat * kvm)592 static void kvm_display(struct perf_kvm_stat *kvm)
593 {
594 	use_browser = 0;
595 	print_result(kvm);
596 }
597 
598 #endif /* HAVE_SLANG_SUPPORT */
599 
600 #endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
601 
get_filename_for_perf_kvm(void)602 static const char *get_filename_for_perf_kvm(void)
603 {
604 	const char *filename;
605 
606 	if (perf_host && !perf_guest)
607 		filename = strdup("perf.data.host");
608 	else if (!perf_host && perf_guest)
609 		filename = strdup("perf.data.guest");
610 	else
611 		filename = strdup("perf.data.kvm");
612 
613 	return filename;
614 }
615 
616 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
617 
register_kvm_events_ops(struct perf_kvm_stat * kvm)618 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
619 {
620 	struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
621 
622 	for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
623 		if (!strcmp(events_ops->name, kvm->report_event)) {
624 			kvm->events_ops = events_ops->ops;
625 			return true;
626 		}
627 	}
628 
629 	return false;
630 }
631 
632 struct vcpu_event_record {
633 	int vcpu_id;
634 	u64 start_time;
635 	struct kvm_event *last_event;
636 };
637 
638 #ifdef HAVE_TIMERFD_SUPPORT
clear_events_cache_stats(void)639 static void clear_events_cache_stats(void)
640 {
641 	struct rb_root_cached *root;
642 	struct rb_node *nd;
643 	struct kvm_event *event;
644 	int i;
645 
646 	if (hists__has(&kvm_hists.hists, need_collapse))
647 		root = &kvm_hists.hists.entries_collapsed;
648 	else
649 		root = kvm_hists.hists.entries_in;
650 
651 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
652 		struct hist_entry *he;
653 
654 		he = rb_entry(nd, struct hist_entry, rb_node_in);
655 		event = container_of(he, struct kvm_event, he);
656 
657 		/* reset stats for event */
658 		event->total.time = 0;
659 		init_stats(&event->total.stats);
660 
661 		for (i = 0; i < event->max_vcpu; ++i) {
662 			event->vcpu[i].time = 0;
663 			init_stats(&event->vcpu[i].stats);
664 		}
665 	}
666 }
667 #endif
668 
kvm_event_expand(struct kvm_event * event,int vcpu_id)669 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
670 {
671 	int old_max_vcpu = event->max_vcpu;
672 	void *prev;
673 
674 	if (vcpu_id < event->max_vcpu)
675 		return true;
676 
677 	while (event->max_vcpu <= vcpu_id)
678 		event->max_vcpu += DEFAULT_VCPU_NUM;
679 
680 	prev = event->vcpu;
681 	event->vcpu = realloc(event->vcpu,
682 			      event->max_vcpu * sizeof(*event->vcpu));
683 	if (!event->vcpu) {
684 		free(prev);
685 		pr_err("Not enough memory\n");
686 		return false;
687 	}
688 
689 	memset(event->vcpu + old_max_vcpu, 0,
690 	       (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
691 	return true;
692 }
693 
kvm_he_zalloc(size_t size)694 static void *kvm_he_zalloc(size_t size)
695 {
696 	struct kvm_event *kvm_ev;
697 
698 	kvm_ev = zalloc(size + sizeof(*kvm_ev));
699 	if (!kvm_ev)
700 		return NULL;
701 
702 	init_stats(&kvm_ev->total.stats);
703 	hists__inc_nr_samples(&kvm_hists.hists, 0);
704 	return &kvm_ev->he;
705 }
706 
kvm_he_free(void * he)707 static void kvm_he_free(void *he)
708 {
709 	struct kvm_event *kvm_ev;
710 
711 	kvm_ev = container_of(he, struct kvm_event, he);
712 	free(kvm_ev);
713 }
714 
715 static struct hist_entry_ops kvm_ev_entry_ops = {
716 	.new	= kvm_he_zalloc,
717 	.free	= kvm_he_free,
718 };
719 
find_create_kvm_event(struct perf_kvm_stat * kvm,struct event_key * key,struct perf_sample * sample)720 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
721 					       struct event_key *key,
722 					       struct perf_sample *sample)
723 {
724 	struct kvm_event *event;
725 	struct hist_entry *he;
726 	struct kvm_info *ki;
727 
728 	BUG_ON(key->key == INVALID_KEY);
729 
730 	ki = kvm_info__new();
731 	if (!ki) {
732 		pr_err("Failed to allocate kvm info\n");
733 		return NULL;
734 	}
735 
736 	kvm->events_ops->decode_key(kvm, key, ki->name);
737 	he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops,
738 				  &kvm->al, NULL, NULL, NULL, ki, sample, true);
739 	if (he == NULL) {
740 		pr_err("Failed to allocate hist entry\n");
741 		free(ki);
742 		return NULL;
743 	}
744 
745 	event = container_of(he, struct kvm_event, he);
746 	if (!event->perf_kvm) {
747 		event->perf_kvm = kvm;
748 		event->key = *key;
749 	}
750 
751 	return event;
752 }
753 
handle_begin_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)754 static bool handle_begin_event(struct perf_kvm_stat *kvm,
755 			       struct vcpu_event_record *vcpu_record,
756 			       struct event_key *key,
757 			       struct perf_sample *sample)
758 {
759 	struct kvm_event *event = NULL;
760 
761 	if (key->key != INVALID_KEY)
762 		event = find_create_kvm_event(kvm, key, sample);
763 
764 	vcpu_record->last_event = event;
765 	vcpu_record->start_time = sample->time;
766 	return true;
767 }
768 
769 static void
kvm_update_event_stats(struct kvm_event_stats * kvm_stats,u64 time_diff)770 kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
771 {
772 	kvm_stats->time += time_diff;
773 	update_stats(&kvm_stats->stats, time_diff);
774 }
775 
kvm_event_rel_stddev(int vcpu_id,struct kvm_event * event)776 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
777 {
778 	struct kvm_event_stats *kvm_stats = &event->total;
779 
780 	if (vcpu_id != -1)
781 		kvm_stats = &event->vcpu[vcpu_id];
782 
783 	return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
784 				avg_stats(&kvm_stats->stats));
785 }
786 
update_kvm_event(struct perf_kvm_stat * kvm,struct kvm_event * event,int vcpu_id,u64 time_diff)787 static bool update_kvm_event(struct perf_kvm_stat *kvm,
788 			     struct kvm_event *event, int vcpu_id,
789 			     u64 time_diff)
790 {
791 	/* Update overall statistics */
792 	kvm->total_count++;
793 	kvm->total_time += time_diff;
794 
795 	if (vcpu_id == -1) {
796 		kvm_update_event_stats(&event->total, time_diff);
797 		return true;
798 	}
799 
800 	if (!kvm_event_expand(event, vcpu_id))
801 		return false;
802 
803 	kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
804 	return true;
805 }
806 
is_child_event(struct perf_kvm_stat * kvm,struct evsel * evsel,struct perf_sample * sample,struct event_key * key)807 static bool is_child_event(struct perf_kvm_stat *kvm,
808 			   struct evsel *evsel,
809 			   struct perf_sample *sample,
810 			   struct event_key *key)
811 {
812 	struct child_event_ops *child_ops;
813 
814 	child_ops = kvm->events_ops->child_ops;
815 
816 	if (!child_ops)
817 		return false;
818 
819 	for (; child_ops->name; child_ops++) {
820 		if (evsel__name_is(evsel, child_ops->name)) {
821 			child_ops->get_key(evsel, sample, key);
822 			return true;
823 		}
824 	}
825 
826 	return false;
827 }
828 
handle_child_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)829 static bool handle_child_event(struct perf_kvm_stat *kvm,
830 			       struct vcpu_event_record *vcpu_record,
831 			       struct event_key *key,
832 			       struct perf_sample *sample)
833 {
834 	struct kvm_event *event = NULL;
835 
836 	if (key->key != INVALID_KEY)
837 		event = find_create_kvm_event(kvm, key, sample);
838 
839 	vcpu_record->last_event = event;
840 
841 	return true;
842 }
843 
skip_event(const char * event)844 static bool skip_event(const char *event)
845 {
846 	const char * const *skip_events;
847 
848 	for (skip_events = kvm_skip_events; *skip_events; skip_events++)
849 		if (!strcmp(event, *skip_events))
850 			return true;
851 
852 	return false;
853 }
854 
handle_end_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)855 static bool handle_end_event(struct perf_kvm_stat *kvm,
856 			     struct vcpu_event_record *vcpu_record,
857 			     struct event_key *key,
858 			     struct perf_sample *sample)
859 {
860 	struct kvm_event *event;
861 	u64 time_begin, time_diff;
862 	int vcpu;
863 
864 	if (kvm->trace_vcpu == -1)
865 		vcpu = -1;
866 	else
867 		vcpu = vcpu_record->vcpu_id;
868 
869 	event = vcpu_record->last_event;
870 	time_begin = vcpu_record->start_time;
871 
872 	/* The begin event is not caught. */
873 	if (!time_begin)
874 		return true;
875 
876 	/*
877 	 * In some case, the 'begin event' only records the start timestamp,
878 	 * the actual event is recognized in the 'end event' (e.g. mmio-event).
879 	 */
880 
881 	/* Both begin and end events did not get the key. */
882 	if (!event && key->key == INVALID_KEY)
883 		return true;
884 
885 	if (!event)
886 		event = find_create_kvm_event(kvm, key, sample);
887 
888 	if (!event)
889 		return false;
890 
891 	vcpu_record->last_event = NULL;
892 	vcpu_record->start_time = 0;
893 
894 	/* seems to happen once in a while during live mode */
895 	if (sample->time < time_begin) {
896 		pr_debug("End time before begin time; skipping event.\n");
897 		return true;
898 	}
899 
900 	time_diff = sample->time - time_begin;
901 
902 	if (kvm->duration && time_diff > kvm->duration) {
903 		char decode[KVM_EVENT_NAME_LEN];
904 
905 		kvm->events_ops->decode_key(kvm, &event->key, decode);
906 		if (!skip_event(decode)) {
907 			pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
908 				 sample->time, sample->pid, vcpu_record->vcpu_id,
909 				 decode, time_diff / NSEC_PER_USEC);
910 		}
911 	}
912 
913 	return update_kvm_event(kvm, event, vcpu, time_diff);
914 }
915 
916 static
per_vcpu_record(struct thread * thread,struct evsel * evsel,struct perf_sample * sample)917 struct vcpu_event_record *per_vcpu_record(struct thread *thread,
918 					  struct evsel *evsel,
919 					  struct perf_sample *sample)
920 {
921 	/* Only kvm_entry records vcpu id. */
922 	if (!thread__priv(thread) && kvm_entry_event(evsel)) {
923 		struct vcpu_event_record *vcpu_record;
924 
925 		vcpu_record = zalloc(sizeof(*vcpu_record));
926 		if (!vcpu_record) {
927 			pr_err("%s: Not enough memory\n", __func__);
928 			return NULL;
929 		}
930 
931 		vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
932 		thread__set_priv(thread, vcpu_record);
933 	}
934 
935 	return thread__priv(thread);
936 }
937 
handle_kvm_event(struct perf_kvm_stat * kvm,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)938 static bool handle_kvm_event(struct perf_kvm_stat *kvm,
939 			     struct thread *thread,
940 			     struct evsel *evsel,
941 			     struct perf_sample *sample)
942 {
943 	struct vcpu_event_record *vcpu_record;
944 	struct event_key key = { .key = INVALID_KEY,
945 				 .exit_reasons = kvm->exit_reasons };
946 
947 	vcpu_record = per_vcpu_record(thread, evsel, sample);
948 	if (!vcpu_record)
949 		return true;
950 
951 	/* only process events for vcpus user cares about */
952 	if ((kvm->trace_vcpu != -1) &&
953 	    (kvm->trace_vcpu != vcpu_record->vcpu_id))
954 		return true;
955 
956 	if (kvm->events_ops->is_begin_event(evsel, sample, &key))
957 		return handle_begin_event(kvm, vcpu_record, &key, sample);
958 
959 	if (is_child_event(kvm, evsel, sample, &key))
960 		return handle_child_event(kvm, vcpu_record, &key, sample);
961 
962 	if (kvm->events_ops->is_end_event(evsel, sample, &key))
963 		return handle_end_event(kvm, vcpu_record, &key, sample);
964 
965 	return true;
966 }
967 
is_valid_key(struct perf_kvm_stat * kvm)968 static bool is_valid_key(struct perf_kvm_stat *kvm)
969 {
970 	static const char *key_array[] = {
971 		"ev_name", "sample", "time", "max_t", "min_t", "mean_t",
972 	};
973 	unsigned int i;
974 
975 	for (i = 0; i < ARRAY_SIZE(key_array); i++)
976 		if (!strcmp(key_array[i], kvm->sort_key))
977 			return true;
978 
979 	pr_err("Unsupported sort key: %s\n", kvm->sort_key);
980 	return false;
981 }
982 
event_is_valid(struct kvm_event * event,int vcpu)983 static bool event_is_valid(struct kvm_event *event, int vcpu)
984 {
985 	return !!get_event_count(event, vcpu);
986 }
987 
filter_cb(struct hist_entry * he,void * arg __maybe_unused)988 static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
989 {
990 	struct kvm_event *event;
991 	struct perf_kvm_stat *perf_kvm;
992 
993 	event = container_of(he, struct kvm_event, he);
994 	perf_kvm = event->perf_kvm;
995 	if (!event_is_valid(event, perf_kvm->trace_vcpu))
996 		he->filtered = 1;
997 	else
998 		he->filtered = 0;
999 	return 0;
1000 }
1001 
sort_result(struct perf_kvm_stat * kvm)1002 static void sort_result(struct perf_kvm_stat *kvm)
1003 {
1004 	struct ui_progress prog;
1005 	const char *output_columns = "ev_name,sample,percent_sample,"
1006 				     "time,percent_time,max_t,min_t,mean_t";
1007 
1008 	kvm_hists__reinit(output_columns, kvm->sort_key);
1009 	ui_progress__init(&prog, kvm_hists.hists.nr_entries, "Sorting...");
1010 	hists__collapse_resort(&kvm_hists.hists, NULL);
1011 	hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb);
1012 	ui_progress__finish();
1013 }
1014 
print_vcpu_info(struct perf_kvm_stat * kvm)1015 static void print_vcpu_info(struct perf_kvm_stat *kvm)
1016 {
1017 	int vcpu = kvm->trace_vcpu;
1018 
1019 	pr_info("Analyze events for ");
1020 
1021 	if (kvm->opts.target.system_wide)
1022 		pr_info("all VMs, ");
1023 	else if (kvm->opts.target.pid)
1024 		pr_info("pid(s) %s, ", kvm->opts.target.pid);
1025 	else
1026 		pr_info("dazed and confused on what is monitored, ");
1027 
1028 	if (vcpu == -1)
1029 		pr_info("all VCPUs:\n\n");
1030 	else
1031 		pr_info("VCPU %d:\n\n", vcpu);
1032 }
1033 
show_timeofday(void)1034 static void show_timeofday(void)
1035 {
1036 	char date[64];
1037 	struct timeval tv;
1038 	struct tm ltime;
1039 
1040 	gettimeofday(&tv, NULL);
1041 	if (localtime_r(&tv.tv_sec, &ltime)) {
1042 		strftime(date, sizeof(date), "%H:%M:%S", &ltime);
1043 		pr_info("%s.%06ld", date, tv.tv_usec);
1044 	} else
1045 		pr_info("00:00:00.000000");
1046 
1047 	return;
1048 }
1049 
print_result(struct perf_kvm_stat * kvm)1050 static void print_result(struct perf_kvm_stat *kvm)
1051 {
1052 	char decode[KVM_EVENT_NAME_LEN];
1053 	struct kvm_event *event;
1054 	int vcpu = kvm->trace_vcpu;
1055 	struct rb_node *nd;
1056 
1057 	if (kvm->live) {
1058 		puts(CONSOLE_CLEAR);
1059 		show_timeofday();
1060 	}
1061 
1062 	pr_info("\n\n");
1063 	print_vcpu_info(kvm);
1064 	pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name);
1065 	pr_info("%10s ", "Samples");
1066 	pr_info("%9s ", "Samples%");
1067 
1068 	pr_info("%9s ", "Time%");
1069 	pr_info("%11s ", "Min Time");
1070 	pr_info("%11s ", "Max Time");
1071 	pr_info("%16s ", "Avg time");
1072 	pr_info("\n\n");
1073 
1074 	for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) {
1075 		struct hist_entry *he;
1076 		u64 ecount, etime, max, min;
1077 
1078 		he = rb_entry(nd, struct hist_entry, rb_node);
1079 		if (he->filtered)
1080 			continue;
1081 
1082 		event = container_of(he, struct kvm_event, he);
1083 		ecount = get_event_count(event, vcpu);
1084 		etime = get_event_time(event, vcpu);
1085 		max = get_event_max(event, vcpu);
1086 		min = get_event_min(event, vcpu);
1087 
1088 		kvm->events_ops->decode_key(kvm, &event->key, decode);
1089 		pr_info("%*s ", KVM_EVENT_NAME_LEN, decode);
1090 		pr_info("%10llu ", (unsigned long long)ecount);
1091 		pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
1092 		pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
1093 		pr_info("%9.2fus ", (double)min / NSEC_PER_USEC);
1094 		pr_info("%9.2fus ", (double)max / NSEC_PER_USEC);
1095 		pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC,
1096 			kvm_event_rel_stddev(vcpu, event));
1097 		pr_info("\n");
1098 	}
1099 
1100 	pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
1101 		kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
1102 
1103 	if (kvm->lost_events)
1104 		pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
1105 }
1106 
1107 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
process_lost_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1108 static int process_lost_event(const struct perf_tool *tool,
1109 			      union perf_event *event __maybe_unused,
1110 			      struct perf_sample *sample __maybe_unused,
1111 			      struct machine *machine __maybe_unused)
1112 {
1113 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
1114 
1115 	kvm->lost_events++;
1116 	return 0;
1117 }
1118 #endif
1119 
skip_sample(struct perf_kvm_stat * kvm,struct perf_sample * sample)1120 static bool skip_sample(struct perf_kvm_stat *kvm,
1121 			struct perf_sample *sample)
1122 {
1123 	if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
1124 		return true;
1125 
1126 	return false;
1127 }
1128 
process_sample_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1129 static int process_sample_event(const struct perf_tool *tool,
1130 				union perf_event *event,
1131 				struct perf_sample *sample,
1132 				struct evsel *evsel,
1133 				struct machine *machine)
1134 {
1135 	int err = 0;
1136 	struct thread *thread;
1137 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
1138 						 tool);
1139 
1140 	if (skip_sample(kvm, sample))
1141 		return 0;
1142 
1143 	if (machine__resolve(machine, &kvm->al, sample) < 0) {
1144 		pr_warning("Fail to resolve address location, skip sample.\n");
1145 		return 0;
1146 	}
1147 
1148 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
1149 	if (thread == NULL) {
1150 		pr_debug("problem processing %d event, skipping it.\n",
1151 			event->header.type);
1152 		return -1;
1153 	}
1154 
1155 	if (!handle_kvm_event(kvm, thread, evsel, sample))
1156 		err = -1;
1157 
1158 	thread__put(thread);
1159 	return err;
1160 }
1161 
cpu_isa_config(struct perf_kvm_stat * kvm)1162 static int cpu_isa_config(struct perf_kvm_stat *kvm)
1163 {
1164 	char buf[128], *cpuid;
1165 	int err;
1166 
1167 	if (kvm->live) {
1168 		struct perf_cpu cpu = {-1};
1169 
1170 		err = get_cpuid(buf, sizeof(buf), cpu);
1171 		if (err != 0) {
1172 			pr_err("Failed to look up CPU type: %s\n",
1173 			       str_error_r(err, buf, sizeof(buf)));
1174 			return -err;
1175 		}
1176 		cpuid = buf;
1177 	} else
1178 		cpuid = kvm->session->header.env.cpuid;
1179 
1180 	if (!cpuid) {
1181 		pr_err("Failed to look up CPU type\n");
1182 		return -EINVAL;
1183 	}
1184 
1185 	err = cpu_isa_init(kvm, cpuid);
1186 	if (err == -ENOTSUP)
1187 		pr_err("CPU %s is not supported.\n", cpuid);
1188 
1189 	return err;
1190 }
1191 
verify_vcpu(int vcpu)1192 static bool verify_vcpu(int vcpu)
1193 {
1194 	if (vcpu != -1 && vcpu < 0) {
1195 		pr_err("Invalid vcpu:%d.\n", vcpu);
1196 		return false;
1197 	}
1198 
1199 	return true;
1200 }
1201 
1202 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1203 /* keeping the max events to a modest level to keep
1204  * the processing of samples per mmap smooth.
1205  */
1206 #define PERF_KVM__MAX_EVENTS_PER_MMAP  25
1207 
perf_kvm__mmap_read_idx(struct perf_kvm_stat * kvm,int idx,u64 * mmap_time)1208 static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
1209 				   u64 *mmap_time)
1210 {
1211 	struct evlist *evlist = kvm->evlist;
1212 	union perf_event *event;
1213 	struct mmap *md;
1214 	u64 timestamp;
1215 	s64 n = 0;
1216 	int err;
1217 
1218 	*mmap_time = ULLONG_MAX;
1219 	md = &evlist->mmap[idx];
1220 	err = perf_mmap__read_init(&md->core);
1221 	if (err < 0)
1222 		return (err == -EAGAIN) ? 0 : -1;
1223 
1224 	while ((event = perf_mmap__read_event(&md->core)) != NULL) {
1225 		err = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1226 		if (err) {
1227 			perf_mmap__consume(&md->core);
1228 			pr_err("Failed to parse sample\n");
1229 			return -1;
1230 		}
1231 
1232 		err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL);
1233 		/*
1234 		 * FIXME: Here we can't consume the event, as perf_session__queue_event will
1235 		 *        point to it, and it'll get possibly overwritten by the kernel.
1236 		 */
1237 		perf_mmap__consume(&md->core);
1238 
1239 		if (err) {
1240 			pr_err("Failed to enqueue sample: %d\n", err);
1241 			return -1;
1242 		}
1243 
1244 		/* save time stamp of our first sample for this mmap */
1245 		if (n == 0)
1246 			*mmap_time = timestamp;
1247 
1248 		/* limit events per mmap handled all at once */
1249 		n++;
1250 		if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1251 			break;
1252 	}
1253 
1254 	perf_mmap__read_done(&md->core);
1255 	return n;
1256 }
1257 
perf_kvm__mmap_read(struct perf_kvm_stat * kvm)1258 static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
1259 {
1260 	int i, err, throttled = 0;
1261 	s64 n, ntotal = 0;
1262 	u64 flush_time = ULLONG_MAX, mmap_time;
1263 
1264 	for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
1265 		n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
1266 		if (n < 0)
1267 			return -1;
1268 
1269 		/* flush time is going to be the minimum of all the individual
1270 		 * mmap times. Essentially, we flush all the samples queued up
1271 		 * from the last pass under our minimal start time -- that leaves
1272 		 * a very small race for samples to come in with a lower timestamp.
1273 		 * The ioctl to return the perf_clock timestamp should close the
1274 		 * race entirely.
1275 		 */
1276 		if (mmap_time < flush_time)
1277 			flush_time = mmap_time;
1278 
1279 		ntotal += n;
1280 		if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1281 			throttled = 1;
1282 	}
1283 
1284 	/* flush queue after each round in which we processed events */
1285 	if (ntotal) {
1286 		struct ordered_events *oe = &kvm->session->ordered_events;
1287 
1288 		oe->next_flush = flush_time;
1289 		err = ordered_events__flush(oe, OE_FLUSH__ROUND);
1290 		if (err) {
1291 			if (kvm->lost_events)
1292 				pr_info("\nLost events: %" PRIu64 "\n\n",
1293 					kvm->lost_events);
1294 			return err;
1295 		}
1296 	}
1297 
1298 	return throttled;
1299 }
1300 
1301 static volatile int done;
1302 
sig_handler(int sig __maybe_unused)1303 static void sig_handler(int sig __maybe_unused)
1304 {
1305 	done = 1;
1306 }
1307 
perf_kvm__timerfd_create(struct perf_kvm_stat * kvm)1308 static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
1309 {
1310 	struct itimerspec new_value;
1311 	int rc = -1;
1312 
1313 	kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
1314 	if (kvm->timerfd < 0) {
1315 		pr_err("timerfd_create failed\n");
1316 		goto out;
1317 	}
1318 
1319 	new_value.it_value.tv_sec = kvm->display_time;
1320 	new_value.it_value.tv_nsec = 0;
1321 	new_value.it_interval.tv_sec = kvm->display_time;
1322 	new_value.it_interval.tv_nsec = 0;
1323 
1324 	if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
1325 		pr_err("timerfd_settime failed: %d\n", errno);
1326 		close(kvm->timerfd);
1327 		goto out;
1328 	}
1329 
1330 	rc = 0;
1331 out:
1332 	return rc;
1333 }
1334 
perf_kvm__handle_timerfd(struct perf_kvm_stat * kvm)1335 static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
1336 {
1337 	uint64_t c;
1338 	int rc;
1339 
1340 	rc = read(kvm->timerfd, &c, sizeof(uint64_t));
1341 	if (rc < 0) {
1342 		if (errno == EAGAIN)
1343 			return 0;
1344 
1345 		pr_err("Failed to read timer fd: %d\n", errno);
1346 		return -1;
1347 	}
1348 
1349 	if (rc != sizeof(uint64_t)) {
1350 		pr_err("Error reading timer fd - invalid size returned\n");
1351 		return -1;
1352 	}
1353 
1354 	if (c != 1)
1355 		pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
1356 
1357 	/* update display */
1358 	sort_result(kvm);
1359 	print_result(kvm);
1360 
1361 	/* Reset sort list to "ev_name" */
1362 	kvm_hists__reinit(NULL, "ev_name");
1363 
1364 	/* reset counts */
1365 	clear_events_cache_stats();
1366 	kvm->total_count = 0;
1367 	kvm->total_time = 0;
1368 	kvm->lost_events = 0;
1369 
1370 	return 0;
1371 }
1372 
fd_set_nonblock(int fd)1373 static int fd_set_nonblock(int fd)
1374 {
1375 	long arg = 0;
1376 
1377 	arg = fcntl(fd, F_GETFL);
1378 	if (arg < 0) {
1379 		pr_err("Failed to get current flags for fd %d\n", fd);
1380 		return -1;
1381 	}
1382 
1383 	if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
1384 		pr_err("Failed to set non-block option on fd %d\n", fd);
1385 		return -1;
1386 	}
1387 
1388 	return 0;
1389 }
1390 
perf_kvm__handle_stdin(void)1391 static int perf_kvm__handle_stdin(void)
1392 {
1393 	int c;
1394 
1395 	c = getc(stdin);
1396 	if (c == 'q')
1397 		return 1;
1398 
1399 	return 0;
1400 }
1401 
kvm_events_live_report(struct perf_kvm_stat * kvm)1402 static int kvm_events_live_report(struct perf_kvm_stat *kvm)
1403 {
1404 	int nr_stdin, ret, err = -EINVAL;
1405 	struct termios save;
1406 
1407 	/* live flag must be set first */
1408 	kvm->live = true;
1409 
1410 	ret = cpu_isa_config(kvm);
1411 	if (ret < 0)
1412 		return ret;
1413 
1414 	if (!verify_vcpu(kvm->trace_vcpu) ||
1415 	    !is_valid_key(kvm) ||
1416 	    !register_kvm_events_ops(kvm)) {
1417 		goto out;
1418 	}
1419 
1420 	set_term_quiet_input(&save);
1421 
1422 	kvm_hists__init();
1423 
1424 	signal(SIGINT, sig_handler);
1425 	signal(SIGTERM, sig_handler);
1426 
1427 	/* add timer fd */
1428 	if (perf_kvm__timerfd_create(kvm) < 0) {
1429 		err = -1;
1430 		goto out;
1431 	}
1432 
1433 	if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
1434 		goto out;
1435 
1436 	nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
1437 	if (nr_stdin < 0)
1438 		goto out;
1439 
1440 	if (fd_set_nonblock(fileno(stdin)) != 0)
1441 		goto out;
1442 
1443 	/* everything is good - enable the events and process */
1444 	evlist__enable(kvm->evlist);
1445 
1446 	while (!done) {
1447 		struct fdarray *fda = &kvm->evlist->core.pollfd;
1448 		int rc;
1449 
1450 		rc = perf_kvm__mmap_read(kvm);
1451 		if (rc < 0)
1452 			break;
1453 
1454 		err = perf_kvm__handle_timerfd(kvm);
1455 		if (err)
1456 			goto out;
1457 
1458 		if (fda->entries[nr_stdin].revents & POLLIN)
1459 			done = perf_kvm__handle_stdin();
1460 
1461 		if (!rc && !done)
1462 			err = evlist__poll(kvm->evlist, 100);
1463 	}
1464 
1465 	evlist__disable(kvm->evlist);
1466 
1467 	if (err == 0) {
1468 		sort_result(kvm);
1469 		print_result(kvm);
1470 	}
1471 
1472 out:
1473 	hists__delete_entries(&kvm_hists.hists);
1474 
1475 	if (kvm->timerfd >= 0)
1476 		close(kvm->timerfd);
1477 
1478 	tcsetattr(0, TCSAFLUSH, &save);
1479 	return err;
1480 }
1481 
kvm_live_open_events(struct perf_kvm_stat * kvm)1482 static int kvm_live_open_events(struct perf_kvm_stat *kvm)
1483 {
1484 	int err, rc = -1;
1485 	struct evsel *pos;
1486 	struct evlist *evlist = kvm->evlist;
1487 	char sbuf[STRERR_BUFSIZE];
1488 
1489 	evlist__config(evlist, &kvm->opts, NULL);
1490 
1491 	/*
1492 	 * Note: exclude_{guest,host} do not apply here.
1493 	 *       This command processes KVM tracepoints from host only
1494 	 */
1495 	evlist__for_each_entry(evlist, pos) {
1496 		struct perf_event_attr *attr = &pos->core.attr;
1497 
1498 		/* make sure these *are* set */
1499 		evsel__set_sample_bit(pos, TID);
1500 		evsel__set_sample_bit(pos, TIME);
1501 		evsel__set_sample_bit(pos, CPU);
1502 		evsel__set_sample_bit(pos, RAW);
1503 		/* make sure these are *not*; want as small a sample as possible */
1504 		evsel__reset_sample_bit(pos, PERIOD);
1505 		evsel__reset_sample_bit(pos, IP);
1506 		evsel__reset_sample_bit(pos, CALLCHAIN);
1507 		evsel__reset_sample_bit(pos, ADDR);
1508 		evsel__reset_sample_bit(pos, READ);
1509 		attr->mmap = 0;
1510 		attr->comm = 0;
1511 		attr->task = 0;
1512 
1513 		attr->sample_period = 1;
1514 
1515 		attr->watermark = 0;
1516 		attr->wakeup_events = 1000;
1517 
1518 		/* will enable all once we are ready */
1519 		attr->disabled = 1;
1520 	}
1521 
1522 	err = evlist__open(evlist);
1523 	if (err < 0) {
1524 		printf("Couldn't create the events: %s\n",
1525 		       str_error_r(errno, sbuf, sizeof(sbuf)));
1526 		goto out;
1527 	}
1528 
1529 	if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
1530 		ui__error("Failed to mmap the events: %s\n",
1531 			  str_error_r(errno, sbuf, sizeof(sbuf)));
1532 		evlist__close(evlist);
1533 		goto out;
1534 	}
1535 
1536 	rc = 0;
1537 
1538 out:
1539 	return rc;
1540 }
1541 #endif
1542 
read_events(struct perf_kvm_stat * kvm)1543 static int read_events(struct perf_kvm_stat *kvm)
1544 {
1545 	int ret;
1546 
1547 	struct perf_data file = {
1548 		.path  = kvm->file_name,
1549 		.mode  = PERF_DATA_MODE_READ,
1550 		.force = kvm->force,
1551 	};
1552 
1553 	perf_tool__init(&kvm->tool, /*ordered_events=*/true);
1554 	kvm->tool.sample	= process_sample_event;
1555 	kvm->tool.comm		= perf_event__process_comm;
1556 	kvm->tool.namespaces	= perf_event__process_namespaces;
1557 
1558 	kvm->session = perf_session__new(&file, &kvm->tool);
1559 	if (IS_ERR(kvm->session)) {
1560 		pr_err("Initializing perf session failed\n");
1561 		return PTR_ERR(kvm->session);
1562 	}
1563 
1564 	symbol__init(&kvm->session->header.env);
1565 
1566 	if (!perf_session__has_traces(kvm->session, "kvm record")) {
1567 		ret = -EINVAL;
1568 		goto out_delete;
1569 	}
1570 
1571 	/*
1572 	 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
1573 	 * traced in the old kernel.
1574 	 */
1575 	ret = cpu_isa_config(kvm);
1576 	if (ret < 0)
1577 		goto out_delete;
1578 
1579 	ret = perf_session__process_events(kvm->session);
1580 
1581 out_delete:
1582 	perf_session__delete(kvm->session);
1583 	return ret;
1584 }
1585 
parse_target_str(struct perf_kvm_stat * kvm)1586 static int parse_target_str(struct perf_kvm_stat *kvm)
1587 {
1588 	if (kvm->opts.target.pid) {
1589 		kvm->pid_list = intlist__new(kvm->opts.target.pid);
1590 		if (kvm->pid_list == NULL) {
1591 			pr_err("Error parsing process id string\n");
1592 			return -EINVAL;
1593 		}
1594 	}
1595 
1596 	return 0;
1597 }
1598 
kvm_events_report_vcpu(struct perf_kvm_stat * kvm)1599 static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
1600 {
1601 	int ret = -EINVAL;
1602 	int vcpu = kvm->trace_vcpu;
1603 
1604 	if (parse_target_str(kvm) != 0)
1605 		goto exit;
1606 
1607 	if (!verify_vcpu(vcpu))
1608 		goto exit;
1609 
1610 	if (!is_valid_key(kvm))
1611 		goto exit;
1612 
1613 	if (!register_kvm_events_ops(kvm))
1614 		goto exit;
1615 
1616 	if (kvm->use_stdio) {
1617 		use_browser = 0;
1618 		setup_pager();
1619 	} else {
1620 		use_browser = 1;
1621 	}
1622 
1623 	setup_browser(false);
1624 
1625 	kvm_hists__init();
1626 
1627 	ret = read_events(kvm);
1628 	if (ret)
1629 		goto exit;
1630 
1631 	sort_result(kvm);
1632 	kvm_display(kvm);
1633 
1634 exit:
1635 	hists__delete_entries(&kvm_hists.hists);
1636 	return ret;
1637 }
1638 
1639 #define STRDUP_FAIL_EXIT(s)		\
1640 	({	char *_p;		\
1641 	_p = strdup(s);		\
1642 		if (!_p)		\
1643 			return -ENOMEM;	\
1644 		_p;			\
1645 	})
1646 
setup_kvm_events_tp(struct perf_kvm_stat * kvm __maybe_unused)1647 int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
1648 {
1649 	return 0;
1650 }
1651 
1652 static int
kvm_events_record(struct perf_kvm_stat * kvm,int argc,const char ** argv)1653 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1654 {
1655 	unsigned int rec_argc, i, j, events_tp_size;
1656 	const char **rec_argv;
1657 	const char * const record_args[] = {
1658 		"record",
1659 		"-R",
1660 		"-m", "1024",
1661 		"-c", "1",
1662 	};
1663 	const char * const kvm_stat_record_usage[] = {
1664 		"perf kvm stat record [<options>]",
1665 		NULL
1666 	};
1667 	const char * const *events_tp;
1668 	int ret;
1669 
1670 	events_tp_size = 0;
1671 	ret = setup_kvm_events_tp(kvm);
1672 	if (ret < 0) {
1673 		pr_err("Unable to setup the kvm tracepoints\n");
1674 		return ret;
1675 	}
1676 
1677 	for (events_tp = kvm_events_tp; *events_tp; events_tp++)
1678 		events_tp_size++;
1679 
1680 	rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
1681 		   2 * events_tp_size;
1682 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1683 
1684 	if (rec_argv == NULL)
1685 		return -ENOMEM;
1686 
1687 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1688 		rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1689 
1690 	for (j = 0; j < events_tp_size; j++) {
1691 		rec_argv[i++] = "-e";
1692 		rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
1693 	}
1694 
1695 	rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
1696 	rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
1697 
1698 	for (j = 1; j < (unsigned int)argc; j++, i++)
1699 		rec_argv[i] = argv[j];
1700 
1701 	set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
1702 	set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
1703 	set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
1704 
1705 	set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
1706 	set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
1707 	set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
1708 	set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
1709 	set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
1710 	set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
1711 	set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
1712 	set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
1713 	set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
1714 	set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
1715 	set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
1716 	set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
1717 	set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
1718 	set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
1719 	set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
1720 
1721 	record_usage = kvm_stat_record_usage;
1722 	return cmd_record(i, rec_argv);
1723 }
1724 
1725 static int
kvm_events_report(struct perf_kvm_stat * kvm,int argc,const char ** argv)1726 kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
1727 {
1728 	const struct option kvm_events_report_options[] = {
1729 		OPT_STRING(0, "event", &kvm->report_event, "report event",
1730 			   "event for reporting: vmexit, "
1731 			   "mmio (x86 only), ioport (x86 only)"),
1732 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1733 			    "vcpu id to report"),
1734 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1735 			    "key for sorting: sample(sort by samples number)"
1736 			    " time (sort by avg time)"),
1737 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1738 			   "analyze events only for given process id(s)"),
1739 		OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
1740 		OPT_BOOLEAN(0, "stdio", &kvm->use_stdio, "use the stdio interface"),
1741 		OPT_END()
1742 	};
1743 
1744 	const char * const kvm_events_report_usage[] = {
1745 		"perf kvm stat report [<options>]",
1746 		NULL
1747 	};
1748 
1749 	if (argc) {
1750 		argc = parse_options(argc, argv,
1751 				     kvm_events_report_options,
1752 				     kvm_events_report_usage, 0);
1753 		if (argc)
1754 			usage_with_options(kvm_events_report_usage,
1755 					   kvm_events_report_options);
1756 	}
1757 
1758 #ifndef HAVE_SLANG_SUPPORT
1759 	kvm->use_stdio = true;
1760 #endif
1761 
1762 	if (!kvm->opts.target.pid)
1763 		kvm->opts.target.system_wide = true;
1764 
1765 	return kvm_events_report_vcpu(kvm);
1766 }
1767 
1768 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
kvm_live_event_list(void)1769 static struct evlist *kvm_live_event_list(void)
1770 {
1771 	struct evlist *evlist;
1772 	char *tp, *name, *sys;
1773 	int err = -1;
1774 	const char * const *events_tp;
1775 
1776 	evlist = evlist__new();
1777 	if (evlist == NULL)
1778 		return NULL;
1779 
1780 	for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
1781 
1782 		tp = strdup(*events_tp);
1783 		if (tp == NULL)
1784 			goto out;
1785 
1786 		/* split tracepoint into subsystem and name */
1787 		sys = tp;
1788 		name = strchr(tp, ':');
1789 		if (name == NULL) {
1790 			pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
1791 			       *events_tp);
1792 			free(tp);
1793 			goto out;
1794 		}
1795 		*name = '\0';
1796 		name++;
1797 
1798 		if (evlist__add_newtp(evlist, sys, name, NULL)) {
1799 			pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
1800 			free(tp);
1801 			goto out;
1802 		}
1803 
1804 		free(tp);
1805 	}
1806 
1807 	err = 0;
1808 
1809 out:
1810 	if (err) {
1811 		evlist__delete(evlist);
1812 		evlist = NULL;
1813 	}
1814 
1815 	return evlist;
1816 }
1817 
kvm_events_live(struct perf_kvm_stat * kvm,int argc,const char ** argv)1818 static int kvm_events_live(struct perf_kvm_stat *kvm,
1819 			   int argc, const char **argv)
1820 {
1821 	char errbuf[BUFSIZ];
1822 	int err;
1823 
1824 	const struct option live_options[] = {
1825 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1826 			"record events on existing process id"),
1827 		OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
1828 			"number of mmap data pages", evlist__parse_mmap_pages),
1829 		OPT_INCR('v', "verbose", &verbose,
1830 			"be more verbose (show counter open errors, etc)"),
1831 		OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
1832 			"system-wide collection from all CPUs"),
1833 		OPT_UINTEGER('d', "display", &kvm->display_time,
1834 			"time in seconds between display updates"),
1835 		OPT_STRING(0, "event", &kvm->report_event, "report event",
1836 			"event for reporting: "
1837 			"vmexit, mmio (x86 only), ioport (x86 only)"),
1838 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1839 			"vcpu id to report"),
1840 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1841 			"key for sorting: sample(sort by samples number)"
1842 			" time (sort by avg time)"),
1843 		OPT_U64(0, "duration", &kvm->duration,
1844 			"show events other than"
1845 			" HLT (x86 only) or Wait state (s390 only)"
1846 			" that take longer than duration usecs"),
1847 		OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1848 				"per thread proc mmap processing timeout in ms"),
1849 		OPT_END()
1850 	};
1851 	const char * const live_usage[] = {
1852 		"perf kvm stat live [<options>]",
1853 		NULL
1854 	};
1855 	struct perf_data data = {
1856 		.mode = PERF_DATA_MODE_WRITE,
1857 	};
1858 
1859 
1860 	/* event handling */
1861 	perf_tool__init(&kvm->tool, /*ordered_events=*/true);
1862 	kvm->tool.sample = process_sample_event;
1863 	kvm->tool.comm   = perf_event__process_comm;
1864 	kvm->tool.exit   = perf_event__process_exit;
1865 	kvm->tool.fork   = perf_event__process_fork;
1866 	kvm->tool.lost   = process_lost_event;
1867 	kvm->tool.namespaces  = perf_event__process_namespaces;
1868 
1869 	/* set defaults */
1870 	kvm->display_time = 1;
1871 	kvm->opts.user_interval = 1;
1872 	kvm->opts.mmap_pages = 512;
1873 	kvm->opts.target.uses_mmap = false;
1874 	kvm->opts.target.uid_str = NULL;
1875 	kvm->opts.target.uid = UINT_MAX;
1876 
1877 	symbol__init(NULL);
1878 	disable_buildid_cache();
1879 
1880 	use_browser = 0;
1881 
1882 	if (argc) {
1883 		argc = parse_options(argc, argv, live_options,
1884 				     live_usage, 0);
1885 		if (argc)
1886 			usage_with_options(live_usage, live_options);
1887 	}
1888 
1889 	kvm->duration *= NSEC_PER_USEC;   /* convert usec to nsec */
1890 
1891 	/*
1892 	 * target related setups
1893 	 */
1894 	err = target__validate(&kvm->opts.target);
1895 	if (err) {
1896 		target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
1897 		ui__warning("%s", errbuf);
1898 	}
1899 
1900 	if (target__none(&kvm->opts.target))
1901 		kvm->opts.target.system_wide = true;
1902 
1903 
1904 	/*
1905 	 * generate the event list
1906 	 */
1907 	err = setup_kvm_events_tp(kvm);
1908 	if (err < 0) {
1909 		pr_err("Unable to setup the kvm tracepoints\n");
1910 		return err;
1911 	}
1912 
1913 	kvm->evlist = kvm_live_event_list();
1914 	if (kvm->evlist == NULL) {
1915 		err = -1;
1916 		goto out;
1917 	}
1918 
1919 	if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1920 		usage_with_options(live_usage, live_options);
1921 
1922 	/*
1923 	 * perf session
1924 	 */
1925 	kvm->session = perf_session__new(&data, &kvm->tool);
1926 	if (IS_ERR(kvm->session)) {
1927 		err = PTR_ERR(kvm->session);
1928 		goto out;
1929 	}
1930 	kvm->session->evlist = kvm->evlist;
1931 	perf_session__set_id_hdr_size(kvm->session);
1932 	ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
1933 	machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1934 				    kvm->evlist->core.threads, true, false, 1);
1935 	err = kvm_live_open_events(kvm);
1936 	if (err)
1937 		goto out;
1938 
1939 	err = kvm_events_live_report(kvm);
1940 
1941 out:
1942 	perf_session__delete(kvm->session);
1943 	kvm->session = NULL;
1944 	evlist__delete(kvm->evlist);
1945 
1946 	return err;
1947 }
1948 #endif
1949 
print_kvm_stat_usage(void)1950 static void print_kvm_stat_usage(void)
1951 {
1952 	printf("Usage: perf kvm stat <command>\n\n");
1953 
1954 	printf("# Available commands:\n");
1955 	printf("\trecord: record kvm events\n");
1956 	printf("\treport: report statistical data of kvm events\n");
1957 	printf("\tlive:   live reporting of statistical data of kvm events\n");
1958 
1959 	printf("\nOtherwise, it is the alias of 'perf stat':\n");
1960 }
1961 
kvm_cmd_stat(const char * file_name,int argc,const char ** argv)1962 static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
1963 {
1964 	struct perf_kvm_stat kvm = {
1965 		.file_name = file_name,
1966 
1967 		.trace_vcpu	= -1,
1968 		.report_event	= "vmexit",
1969 		.sort_key	= "sample",
1970 
1971 	};
1972 
1973 	if (argc == 1) {
1974 		print_kvm_stat_usage();
1975 		goto perf_stat;
1976 	}
1977 
1978 	if (strlen(argv[1]) > 2 && strstarts("record", argv[1]))
1979 		return kvm_events_record(&kvm, argc - 1, argv + 1);
1980 
1981 	if (strlen(argv[1]) > 2 && strstarts("report", argv[1]))
1982 		return kvm_events_report(&kvm, argc - 1 , argv + 1);
1983 
1984 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1985 	if (!strncmp(argv[1], "live", 4))
1986 		return kvm_events_live(&kvm, argc - 1 , argv + 1);
1987 #endif
1988 
1989 perf_stat:
1990 	return cmd_stat(argc, argv);
1991 }
1992 #endif /* HAVE_KVM_STAT_SUPPORT */
1993 
kvm_add_default_arch_event(int * argc __maybe_unused,const char ** argv __maybe_unused)1994 int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
1995 					const char **argv __maybe_unused)
1996 {
1997 	return 0;
1998 }
1999 
__cmd_record(const char * file_name,int argc,const char ** argv)2000 static int __cmd_record(const char *file_name, int argc, const char **argv)
2001 {
2002 	int rec_argc, i = 0, j, ret;
2003 	const char **rec_argv;
2004 
2005 	ret = kvm_add_default_arch_event(&argc, argv);
2006 	if (ret)
2007 		return -EINVAL;
2008 
2009 	rec_argc = argc + 2;
2010 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2011 	rec_argv[i++] = strdup("record");
2012 	rec_argv[i++] = strdup("-o");
2013 	rec_argv[i++] = strdup(file_name);
2014 	for (j = 1; j < argc; j++, i++)
2015 		rec_argv[i] = argv[j];
2016 
2017 	BUG_ON(i != rec_argc);
2018 
2019 	return cmd_record(i, rec_argv);
2020 }
2021 
__cmd_report(const char * file_name,int argc,const char ** argv)2022 static int __cmd_report(const char *file_name, int argc, const char **argv)
2023 {
2024 	int rec_argc, i = 0, j;
2025 	const char **rec_argv;
2026 
2027 	rec_argc = argc + 2;
2028 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2029 	rec_argv[i++] = strdup("report");
2030 	rec_argv[i++] = strdup("-i");
2031 	rec_argv[i++] = strdup(file_name);
2032 	for (j = 1; j < argc; j++, i++)
2033 		rec_argv[i] = argv[j];
2034 
2035 	BUG_ON(i != rec_argc);
2036 
2037 	return cmd_report(i, rec_argv);
2038 }
2039 
2040 static int
__cmd_buildid_list(const char * file_name,int argc,const char ** argv)2041 __cmd_buildid_list(const char *file_name, int argc, const char **argv)
2042 {
2043 	int rec_argc, i = 0, j;
2044 	const char **rec_argv;
2045 
2046 	rec_argc = argc + 2;
2047 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2048 	rec_argv[i++] = strdup("buildid-list");
2049 	rec_argv[i++] = strdup("-i");
2050 	rec_argv[i++] = strdup(file_name);
2051 	for (j = 1; j < argc; j++, i++)
2052 		rec_argv[i] = argv[j];
2053 
2054 	BUG_ON(i != rec_argc);
2055 
2056 	return cmd_buildid_list(i, rec_argv);
2057 }
2058 
cmd_kvm(int argc,const char ** argv)2059 int cmd_kvm(int argc, const char **argv)
2060 {
2061 	const char *file_name = NULL;
2062 	const struct option kvm_options[] = {
2063 		OPT_STRING('i', "input", &file_name, "file",
2064 			   "Input file name"),
2065 		OPT_STRING('o', "output", &file_name, "file",
2066 			   "Output file name"),
2067 		OPT_BOOLEAN(0, "guest", &perf_guest,
2068 			    "Collect guest os data"),
2069 		OPT_BOOLEAN(0, "host", &perf_host,
2070 			    "Collect host os data"),
2071 		OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2072 			   "guest mount directory under which every guest os"
2073 			   " instance has a subdir"),
2074 		OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
2075 			   "file", "file saving guest os vmlinux"),
2076 		OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
2077 			   "file", "file saving guest os /proc/kallsyms"),
2078 		OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
2079 			   "file", "file saving guest os /proc/modules"),
2080 		OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
2081 			    "Guest code can be found in hypervisor process"),
2082 		OPT_INCR('v', "verbose", &verbose,
2083 			    "be more verbose (show counter open errors, etc)"),
2084 		OPT_END()
2085 	};
2086 
2087 	const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
2088 						"buildid-list", "stat", NULL };
2089 	const char *kvm_usage[] = { NULL, NULL };
2090 
2091 	exclude_GH_default = true;
2092 	perf_host  = 0;
2093 	perf_guest = 1;
2094 
2095 	argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
2096 					PARSE_OPT_STOP_AT_NON_OPTION);
2097 	if (!argc)
2098 		usage_with_options(kvm_usage, kvm_options);
2099 
2100 	if (!perf_host)
2101 		perf_guest = 1;
2102 
2103 	if (!file_name) {
2104 		file_name = get_filename_for_perf_kvm();
2105 
2106 		if (!file_name) {
2107 			pr_err("Failed to allocate memory for filename\n");
2108 			return -ENOMEM;
2109 		}
2110 	}
2111 
2112 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
2113 		return __cmd_record(file_name, argc, argv);
2114 	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
2115 		return __cmd_report(file_name, argc, argv);
2116 	else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
2117 		return cmd_diff(argc, argv);
2118 	else if (!strcmp(argv[0], "top"))
2119 		return cmd_top(argc, argv);
2120 	else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
2121 		return __cmd_buildid_list(file_name, argc, argv);
2122 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
2123 	else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
2124 		return kvm_cmd_stat(file_name, argc, argv);
2125 #endif
2126 	else
2127 		usage_with_options(kvm_usage, kvm_options);
2128 
2129 	/* free usage string allocated by parse_options_subcommand */
2130 	free((void *)kvm_usage[0]);
2131 
2132 	return 0;
2133 }
2134