1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CTF writing support via babeltrace.
4 *
5 * Copyright (C) 2014, Jiri Olsa <[email protected]>
6 * Copyright (C) 2014, Sebastian Andrzej Siewior <[email protected]>
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include "asm/bug.h"
23 #include "data-convert.h"
24 #include "session.h"
25 #include "debug.h"
26 #include "tool.h"
27 #include "evlist.h"
28 #include "evsel.h"
29 #include "machine.h"
30 #include "config.h"
31 #include <linux/ctype.h>
32 #include <linux/err.h>
33 #include <linux/time64.h>
34 #include "util.h"
35 #include "clockid.h"
36 #include "util/sample.h"
37
38 #ifdef HAVE_LIBTRACEEVENT
39 #include <event-parse.h>
40 #endif
41
42 #define pr_N(n, fmt, ...) \
43 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
44
45 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
46 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
47
48 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
49
50 struct evsel_priv {
51 struct bt_ctf_event_class *event_class;
52 };
53
54 #define MAX_CPUS 4096
55
56 struct ctf_stream {
57 struct bt_ctf_stream *stream;
58 int cpu;
59 u32 count;
60 };
61
62 struct ctf_writer {
63 /* writer primitives */
64 struct bt_ctf_writer *writer;
65 struct ctf_stream **stream;
66 int stream_cnt;
67 struct bt_ctf_stream_class *stream_class;
68 struct bt_ctf_clock *clock;
69
70 /* data types */
71 union {
72 struct {
73 struct bt_ctf_field_type *s64;
74 struct bt_ctf_field_type *u64;
75 struct bt_ctf_field_type *s32;
76 struct bt_ctf_field_type *u32;
77 struct bt_ctf_field_type *string;
78 struct bt_ctf_field_type *u32_hex;
79 struct bt_ctf_field_type *u64_hex;
80 };
81 struct bt_ctf_field_type *array[6];
82 } data;
83 struct bt_ctf_event_class *comm_class;
84 struct bt_ctf_event_class *exit_class;
85 struct bt_ctf_event_class *fork_class;
86 struct bt_ctf_event_class *mmap_class;
87 struct bt_ctf_event_class *mmap2_class;
88 };
89
90 struct convert {
91 struct perf_tool tool;
92 struct ctf_writer writer;
93
94 u64 events_size;
95 u64 events_count;
96 u64 non_sample_count;
97
98 /* Ordered events configured queue size. */
99 u64 queue_size;
100 };
101
value_set(struct bt_ctf_field_type * type,struct bt_ctf_event * event,const char * name,u64 val)102 static int value_set(struct bt_ctf_field_type *type,
103 struct bt_ctf_event *event,
104 const char *name, u64 val)
105 {
106 struct bt_ctf_field *field;
107 bool sign = bt_ctf_field_type_integer_get_signed(type);
108 int ret;
109
110 field = bt_ctf_field_create(type);
111 if (!field) {
112 pr_err("failed to create a field %s\n", name);
113 return -1;
114 }
115
116 if (sign) {
117 ret = bt_ctf_field_signed_integer_set_value(field, val);
118 if (ret) {
119 pr_err("failed to set field value %s\n", name);
120 goto err;
121 }
122 } else {
123 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
124 if (ret) {
125 pr_err("failed to set field value %s\n", name);
126 goto err;
127 }
128 }
129
130 ret = bt_ctf_event_set_payload(event, name, field);
131 if (ret) {
132 pr_err("failed to set payload %s\n", name);
133 goto err;
134 }
135
136 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
137
138 err:
139 bt_ctf_field_put(field);
140 return ret;
141 }
142
143 #define __FUNC_VALUE_SET(_name, _val_type) \
144 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
145 struct bt_ctf_event *event, \
146 const char *name, \
147 _val_type val) \
148 { \
149 struct bt_ctf_field_type *type = cw->data._name; \
150 return value_set(type, event, name, (u64) val); \
151 }
152
153 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
154
155 FUNC_VALUE_SET(s32)
156 FUNC_VALUE_SET(u32)
157 FUNC_VALUE_SET(s64)
158 FUNC_VALUE_SET(u64)
159 __FUNC_VALUE_SET(u64_hex, u64)
160
161 static int string_set_value(struct bt_ctf_field *field, const char *string);
162 static __maybe_unused int
value_set_string(struct ctf_writer * cw,struct bt_ctf_event * event,const char * name,const char * string)163 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
164 const char *name, const char *string)
165 {
166 struct bt_ctf_field_type *type = cw->data.string;
167 struct bt_ctf_field *field;
168 int ret = 0;
169
170 field = bt_ctf_field_create(type);
171 if (!field) {
172 pr_err("failed to create a field %s\n", name);
173 return -1;
174 }
175
176 ret = string_set_value(field, string);
177 if (ret) {
178 pr_err("failed to set value %s\n", name);
179 goto err_put_field;
180 }
181
182 ret = bt_ctf_event_set_payload(event, name, field);
183 if (ret)
184 pr_err("failed to set payload %s\n", name);
185
186 err_put_field:
187 bt_ctf_field_put(field);
188 return ret;
189 }
190
191 static struct bt_ctf_field_type*
get_tracepoint_field_type(struct ctf_writer * cw,struct tep_format_field * field)192 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
193 {
194 unsigned long flags = field->flags;
195
196 if (flags & TEP_FIELD_IS_STRING)
197 return cw->data.string;
198
199 if (!(flags & TEP_FIELD_IS_SIGNED)) {
200 /* unsigned long are mostly pointers */
201 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
202 return cw->data.u64_hex;
203 }
204
205 if (flags & TEP_FIELD_IS_SIGNED) {
206 if (field->size == 8)
207 return cw->data.s64;
208 else
209 return cw->data.s32;
210 }
211
212 if (field->size == 8)
213 return cw->data.u64;
214 else
215 return cw->data.u32;
216 }
217
adjust_signedness(unsigned long long value_int,int size)218 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
219 {
220 unsigned long long value_mask;
221
222 /*
223 * value_mask = (1 << (size * 8 - 1)) - 1.
224 * Directly set value_mask for code readers.
225 */
226 switch (size) {
227 case 1:
228 value_mask = 0x7fULL;
229 break;
230 case 2:
231 value_mask = 0x7fffULL;
232 break;
233 case 4:
234 value_mask = 0x7fffffffULL;
235 break;
236 case 8:
237 /*
238 * For 64 bit value, return it self. There is no need
239 * to fill high bit.
240 */
241 /* Fall through */
242 default:
243 /* BUG! */
244 return value_int;
245 }
246
247 /* If it is a positive value, don't adjust. */
248 if ((value_int & (~0ULL - value_mask)) == 0)
249 return value_int;
250
251 /* Fill upper part of value_int with 1 to make it a negative long long. */
252 return (value_int & value_mask) | ~value_mask;
253 }
254
string_set_value(struct bt_ctf_field * field,const char * string)255 static int string_set_value(struct bt_ctf_field *field, const char *string)
256 {
257 char *buffer = NULL;
258 size_t len = strlen(string), i, p;
259 int err;
260
261 for (i = p = 0; i < len; i++, p++) {
262 if (isprint(string[i])) {
263 if (!buffer)
264 continue;
265 buffer[p] = string[i];
266 } else {
267 char numstr[5];
268
269 snprintf(numstr, sizeof(numstr), "\\x%02x",
270 (unsigned int)(string[i]) & 0xff);
271
272 if (!buffer) {
273 buffer = zalloc(i + (len - i) * 4 + 2);
274 if (!buffer) {
275 pr_err("failed to set unprintable string '%s'\n", string);
276 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
277 }
278 if (i > 0)
279 strncpy(buffer, string, i);
280 }
281 memcpy(buffer + p, numstr, 4);
282 p += 3;
283 }
284 }
285
286 if (!buffer)
287 return bt_ctf_field_string_set_value(field, string);
288 err = bt_ctf_field_string_set_value(field, buffer);
289 free(buffer);
290 return err;
291 }
292
add_tracepoint_field_value(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample,struct tep_format_field * fmtf)293 static int add_tracepoint_field_value(struct ctf_writer *cw,
294 struct bt_ctf_event_class *event_class,
295 struct bt_ctf_event *event,
296 struct perf_sample *sample,
297 struct tep_format_field *fmtf)
298 {
299 struct bt_ctf_field_type *type;
300 struct bt_ctf_field *array_field;
301 struct bt_ctf_field *field;
302 const char *name = fmtf->name;
303 void *data = sample->raw_data;
304 unsigned long flags = fmtf->flags;
305 unsigned int n_items;
306 unsigned int i;
307 unsigned int offset;
308 unsigned int len;
309 int ret;
310
311 name = fmtf->alias;
312 offset = fmtf->offset;
313 len = fmtf->size;
314 if (flags & TEP_FIELD_IS_STRING)
315 flags &= ~TEP_FIELD_IS_ARRAY;
316
317 if (flags & TEP_FIELD_IS_DYNAMIC) {
318 unsigned long long tmp_val;
319
320 tmp_val = tep_read_number(fmtf->event->tep,
321 data + offset, len);
322 offset = tmp_val;
323 len = offset >> 16;
324 offset &= 0xffff;
325 if (tep_field_is_relative(flags))
326 offset += fmtf->offset + fmtf->size;
327 }
328
329 if (flags & TEP_FIELD_IS_ARRAY) {
330
331 type = bt_ctf_event_class_get_field_by_name(
332 event_class, name);
333 array_field = bt_ctf_field_create(type);
334 bt_ctf_field_type_put(type);
335 if (!array_field) {
336 pr_err("Failed to create array type %s\n", name);
337 return -1;
338 }
339
340 len = fmtf->size / fmtf->arraylen;
341 n_items = fmtf->arraylen;
342 } else {
343 n_items = 1;
344 array_field = NULL;
345 }
346
347 type = get_tracepoint_field_type(cw, fmtf);
348
349 for (i = 0; i < n_items; i++) {
350 if (flags & TEP_FIELD_IS_ARRAY)
351 field = bt_ctf_field_array_get_field(array_field, i);
352 else
353 field = bt_ctf_field_create(type);
354
355 if (!field) {
356 pr_err("failed to create a field %s\n", name);
357 return -1;
358 }
359
360 if (flags & TEP_FIELD_IS_STRING)
361 ret = string_set_value(field, data + offset + i * len);
362 else {
363 unsigned long long value_int;
364
365 value_int = tep_read_number(
366 fmtf->event->tep,
367 data + offset + i * len, len);
368
369 if (!(flags & TEP_FIELD_IS_SIGNED))
370 ret = bt_ctf_field_unsigned_integer_set_value(
371 field, value_int);
372 else
373 ret = bt_ctf_field_signed_integer_set_value(
374 field, adjust_signedness(value_int, len));
375 }
376
377 if (ret) {
378 pr_err("failed to set file value %s\n", name);
379 goto err_put_field;
380 }
381 if (!(flags & TEP_FIELD_IS_ARRAY)) {
382 ret = bt_ctf_event_set_payload(event, name, field);
383 if (ret) {
384 pr_err("failed to set payload %s\n", name);
385 goto err_put_field;
386 }
387 }
388 bt_ctf_field_put(field);
389 }
390 if (flags & TEP_FIELD_IS_ARRAY) {
391 ret = bt_ctf_event_set_payload(event, name, array_field);
392 if (ret) {
393 pr_err("Failed add payload array %s\n", name);
394 return -1;
395 }
396 bt_ctf_field_put(array_field);
397 }
398 return 0;
399
400 err_put_field:
401 bt_ctf_field_put(field);
402 return -1;
403 }
404
add_tracepoint_fields_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct tep_format_field * fields,struct perf_sample * sample)405 static int add_tracepoint_fields_values(struct ctf_writer *cw,
406 struct bt_ctf_event_class *event_class,
407 struct bt_ctf_event *event,
408 struct tep_format_field *fields,
409 struct perf_sample *sample)
410 {
411 struct tep_format_field *field;
412 int ret;
413
414 for (field = fields; field; field = field->next) {
415 ret = add_tracepoint_field_value(cw, event_class, event, sample,
416 field);
417 if (ret)
418 return -1;
419 }
420 return 0;
421 }
422
add_tracepoint_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)423 static int add_tracepoint_values(struct ctf_writer *cw,
424 struct bt_ctf_event_class *event_class,
425 struct bt_ctf_event *event,
426 struct evsel *evsel,
427 struct perf_sample *sample)
428 {
429 const struct tep_event *tp_format = evsel__tp_format(evsel);
430 struct tep_format_field *common_fields = tp_format->format.common_fields;
431 struct tep_format_field *fields = tp_format->format.fields;
432 int ret;
433
434 ret = add_tracepoint_fields_values(cw, event_class, event,
435 common_fields, sample);
436 if (!ret)
437 ret = add_tracepoint_fields_values(cw, event_class, event,
438 fields, sample);
439
440 return ret;
441 }
442
443 static int
add_bpf_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample)444 add_bpf_output_values(struct bt_ctf_event_class *event_class,
445 struct bt_ctf_event *event,
446 struct perf_sample *sample)
447 {
448 struct bt_ctf_field_type *len_type, *seq_type;
449 struct bt_ctf_field *len_field, *seq_field;
450 unsigned int raw_size = sample->raw_size;
451 unsigned int nr_elements = raw_size / sizeof(u32);
452 unsigned int i;
453 int ret;
454
455 if (nr_elements * sizeof(u32) != raw_size)
456 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
457 raw_size, nr_elements * sizeof(u32) - raw_size);
458
459 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
460 len_field = bt_ctf_field_create(len_type);
461 if (!len_field) {
462 pr_err("failed to create 'raw_len' for bpf output event\n");
463 ret = -1;
464 goto put_len_type;
465 }
466
467 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
468 if (ret) {
469 pr_err("failed to set field value for raw_len\n");
470 goto put_len_field;
471 }
472 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
473 if (ret) {
474 pr_err("failed to set payload to raw_len\n");
475 goto put_len_field;
476 }
477
478 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
479 seq_field = bt_ctf_field_create(seq_type);
480 if (!seq_field) {
481 pr_err("failed to create 'raw_data' for bpf output event\n");
482 ret = -1;
483 goto put_seq_type;
484 }
485
486 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
487 if (ret) {
488 pr_err("failed to set length of 'raw_data'\n");
489 goto put_seq_field;
490 }
491
492 for (i = 0; i < nr_elements; i++) {
493 struct bt_ctf_field *elem_field =
494 bt_ctf_field_sequence_get_field(seq_field, i);
495
496 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
497 ((u32 *)(sample->raw_data))[i]);
498
499 bt_ctf_field_put(elem_field);
500 if (ret) {
501 pr_err("failed to set raw_data[%d]\n", i);
502 goto put_seq_field;
503 }
504 }
505
506 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
507 if (ret)
508 pr_err("failed to set payload for raw_data\n");
509
510 put_seq_field:
511 bt_ctf_field_put(seq_field);
512 put_seq_type:
513 bt_ctf_field_type_put(seq_type);
514 put_len_field:
515 bt_ctf_field_put(len_field);
516 put_len_type:
517 bt_ctf_field_type_put(len_type);
518 return ret;
519 }
520
521 static int
add_callchain_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct ip_callchain * callchain)522 add_callchain_output_values(struct bt_ctf_event_class *event_class,
523 struct bt_ctf_event *event,
524 struct ip_callchain *callchain)
525 {
526 struct bt_ctf_field_type *len_type, *seq_type;
527 struct bt_ctf_field *len_field, *seq_field;
528 unsigned int nr_elements = callchain->nr;
529 unsigned int i;
530 int ret;
531
532 len_type = bt_ctf_event_class_get_field_by_name(
533 event_class, "perf_callchain_size");
534 len_field = bt_ctf_field_create(len_type);
535 if (!len_field) {
536 pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
537 ret = -1;
538 goto put_len_type;
539 }
540
541 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
542 if (ret) {
543 pr_err("failed to set field value for perf_callchain_size\n");
544 goto put_len_field;
545 }
546 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
547 if (ret) {
548 pr_err("failed to set payload to perf_callchain_size\n");
549 goto put_len_field;
550 }
551
552 seq_type = bt_ctf_event_class_get_field_by_name(
553 event_class, "perf_callchain");
554 seq_field = bt_ctf_field_create(seq_type);
555 if (!seq_field) {
556 pr_err("failed to create 'perf_callchain' for callchain output event\n");
557 ret = -1;
558 goto put_seq_type;
559 }
560
561 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
562 if (ret) {
563 pr_err("failed to set length of 'perf_callchain'\n");
564 goto put_seq_field;
565 }
566
567 for (i = 0; i < nr_elements; i++) {
568 struct bt_ctf_field *elem_field =
569 bt_ctf_field_sequence_get_field(seq_field, i);
570
571 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
572 ((u64 *)(callchain->ips))[i]);
573
574 bt_ctf_field_put(elem_field);
575 if (ret) {
576 pr_err("failed to set callchain[%d]\n", i);
577 goto put_seq_field;
578 }
579 }
580
581 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
582 if (ret)
583 pr_err("failed to set payload for raw_data\n");
584
585 put_seq_field:
586 bt_ctf_field_put(seq_field);
587 put_seq_type:
588 bt_ctf_field_type_put(seq_type);
589 put_len_field:
590 bt_ctf_field_put(len_field);
591 put_len_type:
592 bt_ctf_field_type_put(len_type);
593 return ret;
594 }
595
add_generic_values(struct ctf_writer * cw,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)596 static int add_generic_values(struct ctf_writer *cw,
597 struct bt_ctf_event *event,
598 struct evsel *evsel,
599 struct perf_sample *sample)
600 {
601 u64 type = evsel->core.attr.sample_type;
602 int ret;
603
604 /*
605 * missing:
606 * PERF_SAMPLE_TIME - not needed as we have it in
607 * ctf event header
608 * PERF_SAMPLE_READ - TODO
609 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
610 * PERF_SAMPLE_BRANCH_STACK - TODO
611 * PERF_SAMPLE_REGS_USER - TODO
612 * PERF_SAMPLE_STACK_USER - TODO
613 */
614
615 if (type & PERF_SAMPLE_IP) {
616 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
617 if (ret)
618 return -1;
619 }
620
621 if (type & PERF_SAMPLE_TID) {
622 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
623 if (ret)
624 return -1;
625
626 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
627 if (ret)
628 return -1;
629 }
630
631 if ((type & PERF_SAMPLE_ID) ||
632 (type & PERF_SAMPLE_IDENTIFIER)) {
633 ret = value_set_u64(cw, event, "perf_id", sample->id);
634 if (ret)
635 return -1;
636 }
637
638 if (type & PERF_SAMPLE_STREAM_ID) {
639 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
640 if (ret)
641 return -1;
642 }
643
644 if (type & PERF_SAMPLE_PERIOD) {
645 ret = value_set_u64(cw, event, "perf_period", sample->period);
646 if (ret)
647 return -1;
648 }
649
650 if (type & PERF_SAMPLE_WEIGHT) {
651 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
652 if (ret)
653 return -1;
654 }
655
656 if (type & PERF_SAMPLE_DATA_SRC) {
657 ret = value_set_u64(cw, event, "perf_data_src",
658 sample->data_src);
659 if (ret)
660 return -1;
661 }
662
663 if (type & PERF_SAMPLE_TRANSACTION) {
664 ret = value_set_u64(cw, event, "perf_transaction",
665 sample->transaction);
666 if (ret)
667 return -1;
668 }
669
670 return 0;
671 }
672
ctf_stream__flush(struct ctf_stream * cs)673 static int ctf_stream__flush(struct ctf_stream *cs)
674 {
675 int err = 0;
676
677 if (cs) {
678 err = bt_ctf_stream_flush(cs->stream);
679 if (err)
680 pr_err("CTF stream %d flush failed\n", cs->cpu);
681
682 pr("Flush stream for cpu %d (%u samples)\n",
683 cs->cpu, cs->count);
684
685 cs->count = 0;
686 }
687
688 return err;
689 }
690
ctf_stream__create(struct ctf_writer * cw,int cpu)691 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
692 {
693 struct ctf_stream *cs;
694 struct bt_ctf_field *pkt_ctx = NULL;
695 struct bt_ctf_field *cpu_field = NULL;
696 struct bt_ctf_stream *stream = NULL;
697 int ret;
698
699 cs = zalloc(sizeof(*cs));
700 if (!cs) {
701 pr_err("Failed to allocate ctf stream\n");
702 return NULL;
703 }
704
705 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
706 if (!stream) {
707 pr_err("Failed to create CTF stream\n");
708 goto out;
709 }
710
711 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
712 if (!pkt_ctx) {
713 pr_err("Failed to obtain packet context\n");
714 goto out;
715 }
716
717 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
718 bt_ctf_field_put(pkt_ctx);
719 if (!cpu_field) {
720 pr_err("Failed to obtain cpu field\n");
721 goto out;
722 }
723
724 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
725 if (ret) {
726 pr_err("Failed to update CPU number\n");
727 goto out;
728 }
729
730 bt_ctf_field_put(cpu_field);
731
732 cs->cpu = cpu;
733 cs->stream = stream;
734 return cs;
735
736 out:
737 if (cpu_field)
738 bt_ctf_field_put(cpu_field);
739 if (stream)
740 bt_ctf_stream_put(stream);
741
742 free(cs);
743 return NULL;
744 }
745
ctf_stream__delete(struct ctf_stream * cs)746 static void ctf_stream__delete(struct ctf_stream *cs)
747 {
748 if (cs) {
749 bt_ctf_stream_put(cs->stream);
750 free(cs);
751 }
752 }
753
ctf_stream(struct ctf_writer * cw,int cpu)754 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
755 {
756 struct ctf_stream *cs = cw->stream[cpu];
757
758 if (!cs) {
759 cs = ctf_stream__create(cw, cpu);
760 cw->stream[cpu] = cs;
761 }
762
763 return cs;
764 }
765
get_sample_cpu(struct ctf_writer * cw,struct perf_sample * sample,struct evsel * evsel)766 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
767 struct evsel *evsel)
768 {
769 int cpu = 0;
770
771 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
772 cpu = sample->cpu;
773
774 if (cpu > cw->stream_cnt) {
775 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
776 cpu, cw->stream_cnt);
777 cpu = 0;
778 }
779
780 return cpu;
781 }
782
783 #define STREAM_FLUSH_COUNT 100000
784
785 /*
786 * Currently we have no other way to determine the
787 * time for the stream flush other than keep track
788 * of the number of events and check it against
789 * threshold.
790 */
is_flush_needed(struct ctf_stream * cs)791 static bool is_flush_needed(struct ctf_stream *cs)
792 {
793 return cs->count >= STREAM_FLUSH_COUNT;
794 }
795
process_sample_event(const struct perf_tool * tool,union perf_event * _event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)796 static int process_sample_event(const struct perf_tool *tool,
797 union perf_event *_event,
798 struct perf_sample *sample,
799 struct evsel *evsel,
800 struct machine *machine __maybe_unused)
801 {
802 struct convert *c = container_of(tool, struct convert, tool);
803 struct evsel_priv *priv = evsel->priv;
804 struct ctf_writer *cw = &c->writer;
805 struct ctf_stream *cs;
806 struct bt_ctf_event_class *event_class;
807 struct bt_ctf_event *event;
808 int ret;
809 unsigned long type = evsel->core.attr.sample_type;
810
811 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
812 return 0;
813
814 event_class = priv->event_class;
815
816 /* update stats */
817 c->events_count++;
818 c->events_size += _event->header.size;
819
820 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
821
822 event = bt_ctf_event_create(event_class);
823 if (!event) {
824 pr_err("Failed to create an CTF event\n");
825 return -1;
826 }
827
828 bt_ctf_clock_set_time(cw->clock, sample->time);
829
830 ret = add_generic_values(cw, event, evsel, sample);
831 if (ret)
832 return -1;
833
834 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
835 ret = add_tracepoint_values(cw, event_class, event,
836 evsel, sample);
837 if (ret)
838 return -1;
839 }
840
841 if (type & PERF_SAMPLE_CALLCHAIN) {
842 ret = add_callchain_output_values(event_class,
843 event, sample->callchain);
844 if (ret)
845 return -1;
846 }
847
848 if (evsel__is_bpf_output(evsel)) {
849 ret = add_bpf_output_values(event_class, event, sample);
850 if (ret)
851 return -1;
852 }
853
854 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
855 if (cs) {
856 if (is_flush_needed(cs))
857 ctf_stream__flush(cs);
858
859 cs->count++;
860 bt_ctf_stream_append_event(cs->stream, event);
861 }
862
863 bt_ctf_event_put(event);
864 return cs ? 0 : -1;
865 }
866
867 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
868 do { \
869 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
870 if (ret) \
871 return -1; \
872 } while(0)
873
874 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
875 static int process_##_name##_event(const struct perf_tool *tool, \
876 union perf_event *_event, \
877 struct perf_sample *sample, \
878 struct machine *machine) \
879 { \
880 struct convert *c = container_of(tool, struct convert, tool);\
881 struct ctf_writer *cw = &c->writer; \
882 struct bt_ctf_event_class *event_class = cw->_name##_class;\
883 struct bt_ctf_event *event; \
884 struct ctf_stream *cs; \
885 int ret; \
886 \
887 c->non_sample_count++; \
888 c->events_size += _event->header.size; \
889 event = bt_ctf_event_create(event_class); \
890 if (!event) { \
891 pr_err("Failed to create an CTF event\n"); \
892 return -1; \
893 } \
894 \
895 bt_ctf_clock_set_time(cw->clock, sample->time); \
896 body \
897 cs = ctf_stream(cw, 0); \
898 if (cs) { \
899 if (is_flush_needed(cs)) \
900 ctf_stream__flush(cs); \
901 \
902 cs->count++; \
903 bt_ctf_stream_append_event(cs->stream, event); \
904 } \
905 bt_ctf_event_put(event); \
906 \
907 return perf_event__process_##_name(tool, _event, sample, machine);\
908 }
909
__FUNC_PROCESS_NON_SAMPLE(comm,__NON_SAMPLE_SET_FIELD (comm,u32,pid);__NON_SAMPLE_SET_FIELD (comm,u32,tid);__NON_SAMPLE_SET_FIELD (comm,string,comm);)910 __FUNC_PROCESS_NON_SAMPLE(comm,
911 __NON_SAMPLE_SET_FIELD(comm, u32, pid);
912 __NON_SAMPLE_SET_FIELD(comm, u32, tid);
913 __NON_SAMPLE_SET_FIELD(comm, string, comm);
914 )
915 __FUNC_PROCESS_NON_SAMPLE(fork,
916 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
917 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
918 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
919 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
920 __NON_SAMPLE_SET_FIELD(fork, u64, time);
921 )
922
923 __FUNC_PROCESS_NON_SAMPLE(exit,
924 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
925 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
926 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
927 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
928 __NON_SAMPLE_SET_FIELD(fork, u64, time);
929 )
930 __FUNC_PROCESS_NON_SAMPLE(mmap,
931 __NON_SAMPLE_SET_FIELD(mmap, u32, pid);
932 __NON_SAMPLE_SET_FIELD(mmap, u32, tid);
933 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
934 __NON_SAMPLE_SET_FIELD(mmap, string, filename);
935 )
936 __FUNC_PROCESS_NON_SAMPLE(mmap2,
937 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
938 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
939 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
940 __NON_SAMPLE_SET_FIELD(mmap2, string, filename);
941 )
942 #undef __NON_SAMPLE_SET_FIELD
943 #undef __FUNC_PROCESS_NON_SAMPLE
944
945 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
946 static char *change_name(char *name, char *orig_name, int dup)
947 {
948 char *new_name = NULL;
949 size_t len;
950
951 if (!name)
952 name = orig_name;
953
954 if (dup >= 10)
955 goto out;
956 /*
957 * Add '_' prefix to potential keywork. According to
958 * Mathieu Desnoyers (https://lore.kernel.org/lkml/[email protected]),
959 * further CTF spec updating may require us to use '$'.
960 */
961 if (dup < 0)
962 len = strlen(name) + sizeof("_");
963 else
964 len = strlen(orig_name) + sizeof("_dupl_X");
965
966 new_name = malloc(len);
967 if (!new_name)
968 goto out;
969
970 if (dup < 0)
971 snprintf(new_name, len, "_%s", name);
972 else
973 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
974
975 out:
976 if (name != orig_name)
977 free(name);
978 return new_name;
979 }
980
event_class_add_field(struct bt_ctf_event_class * event_class,struct bt_ctf_field_type * type,struct tep_format_field * field)981 static int event_class_add_field(struct bt_ctf_event_class *event_class,
982 struct bt_ctf_field_type *type,
983 struct tep_format_field *field)
984 {
985 struct bt_ctf_field_type *t = NULL;
986 char *name;
987 int dup = 1;
988 int ret;
989
990 /* alias was already assigned */
991 if (field->alias != field->name)
992 return bt_ctf_event_class_add_field(event_class, type,
993 (char *)field->alias);
994
995 name = field->name;
996
997 /* If 'name' is a keywork, add prefix. */
998 if (bt_ctf_validate_identifier(name))
999 name = change_name(name, field->name, -1);
1000
1001 if (!name) {
1002 pr_err("Failed to fix invalid identifier.");
1003 return -1;
1004 }
1005 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1006 bt_ctf_field_type_put(t);
1007 name = change_name(name, field->name, dup++);
1008 if (!name) {
1009 pr_err("Failed to create dup name for '%s'\n", field->name);
1010 return -1;
1011 }
1012 }
1013
1014 ret = bt_ctf_event_class_add_field(event_class, type, name);
1015 if (!ret)
1016 field->alias = name;
1017
1018 return ret;
1019 }
1020
add_tracepoint_fields_types(struct ctf_writer * cw,struct tep_format_field * fields,struct bt_ctf_event_class * event_class)1021 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1022 struct tep_format_field *fields,
1023 struct bt_ctf_event_class *event_class)
1024 {
1025 struct tep_format_field *field;
1026 int ret;
1027
1028 for (field = fields; field; field = field->next) {
1029 struct bt_ctf_field_type *type;
1030 unsigned long flags = field->flags;
1031
1032 pr2(" field '%s'\n", field->name);
1033
1034 type = get_tracepoint_field_type(cw, field);
1035 if (!type)
1036 return -1;
1037
1038 /*
1039 * A string is an array of chars. For this we use the string
1040 * type and don't care that it is an array. What we don't
1041 * support is an array of strings.
1042 */
1043 if (flags & TEP_FIELD_IS_STRING)
1044 flags &= ~TEP_FIELD_IS_ARRAY;
1045
1046 if (flags & TEP_FIELD_IS_ARRAY)
1047 type = bt_ctf_field_type_array_create(type, field->arraylen);
1048
1049 ret = event_class_add_field(event_class, type, field);
1050
1051 if (flags & TEP_FIELD_IS_ARRAY)
1052 bt_ctf_field_type_put(type);
1053
1054 if (ret) {
1055 pr_err("Failed to add field '%s': %d\n",
1056 field->name, ret);
1057 return -1;
1058 }
1059 }
1060
1061 return 0;
1062 }
1063
add_tracepoint_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * class)1064 static int add_tracepoint_types(struct ctf_writer *cw,
1065 struct evsel *evsel,
1066 struct bt_ctf_event_class *class)
1067 {
1068 const struct tep_event *tp_format = evsel__tp_format(evsel);
1069 struct tep_format_field *common_fields = tp_format ? tp_format->format.common_fields : NULL;
1070 struct tep_format_field *fields = tp_format ? tp_format->format.fields : NULL;
1071 int ret;
1072
1073 ret = add_tracepoint_fields_types(cw, common_fields, class);
1074 if (!ret)
1075 ret = add_tracepoint_fields_types(cw, fields, class);
1076
1077 return ret;
1078 }
1079
add_bpf_output_types(struct ctf_writer * cw,struct bt_ctf_event_class * class)1080 static int add_bpf_output_types(struct ctf_writer *cw,
1081 struct bt_ctf_event_class *class)
1082 {
1083 struct bt_ctf_field_type *len_type = cw->data.u32;
1084 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1085 struct bt_ctf_field_type *seq_type;
1086 int ret;
1087
1088 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1089 if (ret)
1090 return ret;
1091
1092 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1093 if (!seq_type)
1094 return -1;
1095
1096 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1097 }
1098
add_generic_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * event_class)1099 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1100 struct bt_ctf_event_class *event_class)
1101 {
1102 u64 type = evsel->core.attr.sample_type;
1103
1104 /*
1105 * missing:
1106 * PERF_SAMPLE_TIME - not needed as we have it in
1107 * ctf event header
1108 * PERF_SAMPLE_READ - TODO
1109 * PERF_SAMPLE_CALLCHAIN - TODO
1110 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1111 * are handled separately
1112 * PERF_SAMPLE_BRANCH_STACK - TODO
1113 * PERF_SAMPLE_REGS_USER - TODO
1114 * PERF_SAMPLE_STACK_USER - TODO
1115 */
1116
1117 #define ADD_FIELD(cl, t, n) \
1118 do { \
1119 pr2(" field '%s'\n", n); \
1120 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1121 pr_err("Failed to add field '%s';\n", n); \
1122 return -1; \
1123 } \
1124 } while (0)
1125
1126 if (type & PERF_SAMPLE_IP)
1127 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1128
1129 if (type & PERF_SAMPLE_TID) {
1130 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1131 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1132 }
1133
1134 if ((type & PERF_SAMPLE_ID) ||
1135 (type & PERF_SAMPLE_IDENTIFIER))
1136 ADD_FIELD(event_class, cw->data.u64, "perf_id");
1137
1138 if (type & PERF_SAMPLE_STREAM_ID)
1139 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1140
1141 if (type & PERF_SAMPLE_PERIOD)
1142 ADD_FIELD(event_class, cw->data.u64, "perf_period");
1143
1144 if (type & PERF_SAMPLE_WEIGHT)
1145 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1146
1147 if (type & PERF_SAMPLE_DATA_SRC)
1148 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1149
1150 if (type & PERF_SAMPLE_TRANSACTION)
1151 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1152
1153 if (type & PERF_SAMPLE_CALLCHAIN) {
1154 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1155 ADD_FIELD(event_class,
1156 bt_ctf_field_type_sequence_create(
1157 cw->data.u64_hex, "perf_callchain_size"),
1158 "perf_callchain");
1159 }
1160
1161 #undef ADD_FIELD
1162 return 0;
1163 }
1164
add_event(struct ctf_writer * cw,struct evsel * evsel)1165 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1166 {
1167 struct bt_ctf_event_class *event_class;
1168 struct evsel_priv *priv;
1169 const char *name = evsel__name(evsel);
1170 int ret;
1171
1172 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1173
1174 event_class = bt_ctf_event_class_create(name);
1175 if (!event_class)
1176 return -1;
1177
1178 ret = add_generic_types(cw, evsel, event_class);
1179 if (ret)
1180 goto err;
1181
1182 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1183 ret = add_tracepoint_types(cw, evsel, event_class);
1184 if (ret)
1185 goto err;
1186 }
1187
1188 if (evsel__is_bpf_output(evsel)) {
1189 ret = add_bpf_output_types(cw, event_class);
1190 if (ret)
1191 goto err;
1192 }
1193
1194 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1195 if (ret) {
1196 pr("Failed to add event class into stream.\n");
1197 goto err;
1198 }
1199
1200 priv = malloc(sizeof(*priv));
1201 if (!priv)
1202 goto err;
1203
1204 priv->event_class = event_class;
1205 evsel->priv = priv;
1206 return 0;
1207
1208 err:
1209 bt_ctf_event_class_put(event_class);
1210 pr_err("Failed to add event '%s'.\n", name);
1211 return -1;
1212 }
1213
setup_events(struct ctf_writer * cw,struct perf_session * session)1214 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1215 {
1216 struct evlist *evlist = session->evlist;
1217 struct evsel *evsel;
1218 int ret;
1219
1220 evlist__for_each_entry(evlist, evsel) {
1221 ret = add_event(cw, evsel);
1222 if (ret)
1223 return ret;
1224 }
1225 return 0;
1226 }
1227
1228 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1229 do { \
1230 pr2(" field '%s'\n", #n); \
1231 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1232 pr_err("Failed to add field '%s';\n", #n);\
1233 return -1; \
1234 } \
1235 } while(0)
1236
1237 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1238 static int add_##_name##_event(struct ctf_writer *cw) \
1239 { \
1240 struct bt_ctf_event_class *event_class; \
1241 int ret; \
1242 \
1243 pr("Adding "#_name" event\n"); \
1244 event_class = bt_ctf_event_class_create("perf_" #_name);\
1245 if (!event_class) \
1246 return -1; \
1247 body \
1248 \
1249 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1250 if (ret) { \
1251 pr("Failed to add event class '"#_name"' into stream.\n");\
1252 return ret; \
1253 } \
1254 \
1255 cw->_name##_class = event_class; \
1256 bt_ctf_event_class_put(event_class); \
1257 return 0; \
1258 }
1259
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,__NON_SAMPLE_ADD_FIELD (u32,pid);__NON_SAMPLE_ADD_FIELD (u32,tid);__NON_SAMPLE_ADD_FIELD (string,comm);)1260 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1261 __NON_SAMPLE_ADD_FIELD(u32, pid);
1262 __NON_SAMPLE_ADD_FIELD(u32, tid);
1263 __NON_SAMPLE_ADD_FIELD(string, comm);
1264 )
1265
1266 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1267 __NON_SAMPLE_ADD_FIELD(u32, pid);
1268 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1269 __NON_SAMPLE_ADD_FIELD(u32, tid);
1270 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1271 __NON_SAMPLE_ADD_FIELD(u64, time);
1272 )
1273
1274 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1275 __NON_SAMPLE_ADD_FIELD(u32, pid);
1276 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1277 __NON_SAMPLE_ADD_FIELD(u32, tid);
1278 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1279 __NON_SAMPLE_ADD_FIELD(u64, time);
1280 )
1281
1282 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1283 __NON_SAMPLE_ADD_FIELD(u32, pid);
1284 __NON_SAMPLE_ADD_FIELD(u32, tid);
1285 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1286 __NON_SAMPLE_ADD_FIELD(string, filename);
1287 )
1288
1289 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1290 __NON_SAMPLE_ADD_FIELD(u32, pid);
1291 __NON_SAMPLE_ADD_FIELD(u32, tid);
1292 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1293 __NON_SAMPLE_ADD_FIELD(string, filename);
1294 )
1295 #undef __NON_SAMPLE_ADD_FIELD
1296 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1297
1298 static int setup_non_sample_events(struct ctf_writer *cw,
1299 struct perf_session *session __maybe_unused)
1300 {
1301 int ret;
1302
1303 ret = add_comm_event(cw);
1304 if (ret)
1305 return ret;
1306 ret = add_exit_event(cw);
1307 if (ret)
1308 return ret;
1309 ret = add_fork_event(cw);
1310 if (ret)
1311 return ret;
1312 ret = add_mmap_event(cw);
1313 if (ret)
1314 return ret;
1315 ret = add_mmap2_event(cw);
1316 if (ret)
1317 return ret;
1318 return 0;
1319 }
1320
cleanup_events(struct perf_session * session)1321 static void cleanup_events(struct perf_session *session)
1322 {
1323 struct evlist *evlist = session->evlist;
1324 struct evsel *evsel;
1325
1326 evlist__for_each_entry(evlist, evsel) {
1327 struct evsel_priv *priv;
1328
1329 priv = evsel->priv;
1330 bt_ctf_event_class_put(priv->event_class);
1331 zfree(&evsel->priv);
1332 }
1333
1334 evlist__delete(evlist);
1335 session->evlist = NULL;
1336 }
1337
setup_streams(struct ctf_writer * cw,struct perf_session * session)1338 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1339 {
1340 struct ctf_stream **stream;
1341 struct perf_header *ph = &session->header;
1342 int ncpus;
1343
1344 /*
1345 * Try to get the number of cpus used in the data file,
1346 * if not present fallback to the MAX_CPUS.
1347 */
1348 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1349
1350 stream = zalloc(sizeof(*stream) * ncpus);
1351 if (!stream) {
1352 pr_err("Failed to allocate streams.\n");
1353 return -ENOMEM;
1354 }
1355
1356 cw->stream = stream;
1357 cw->stream_cnt = ncpus;
1358 return 0;
1359 }
1360
free_streams(struct ctf_writer * cw)1361 static void free_streams(struct ctf_writer *cw)
1362 {
1363 int cpu;
1364
1365 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1366 ctf_stream__delete(cw->stream[cpu]);
1367
1368 zfree(&cw->stream);
1369 }
1370
ctf_writer__setup_env(struct ctf_writer * cw,struct perf_session * session)1371 static int ctf_writer__setup_env(struct ctf_writer *cw,
1372 struct perf_session *session)
1373 {
1374 struct perf_header *header = &session->header;
1375 struct bt_ctf_writer *writer = cw->writer;
1376
1377 #define ADD(__n, __v) \
1378 do { \
1379 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1380 return -1; \
1381 } while (0)
1382
1383 ADD("host", header->env.hostname);
1384 ADD("sysname", "Linux");
1385 ADD("release", header->env.os_release);
1386 ADD("version", header->env.version);
1387 ADD("machine", header->env.arch);
1388 ADD("domain", "kernel");
1389 ADD("tracer_name", "perf");
1390
1391 #undef ADD
1392 return 0;
1393 }
1394
ctf_writer__setup_clock(struct ctf_writer * cw,struct perf_session * session,bool tod)1395 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1396 struct perf_session *session,
1397 bool tod)
1398 {
1399 struct bt_ctf_clock *clock = cw->clock;
1400 const char *desc = "perf clock";
1401 int64_t offset = 0;
1402
1403 if (tod) {
1404 struct perf_env *env = &session->header.env;
1405
1406 if (!env->clock.enabled) {
1407 pr_err("Can't provide --tod time, missing clock data. "
1408 "Please record with -k/--clockid option.\n");
1409 return -1;
1410 }
1411
1412 desc = clockid_name(env->clock.clockid);
1413 offset = env->clock.tod_ns - env->clock.clockid_ns;
1414 }
1415
1416 #define SET(__n, __v) \
1417 do { \
1418 if (bt_ctf_clock_set_##__n(clock, __v)) \
1419 return -1; \
1420 } while (0)
1421
1422 SET(frequency, 1000000000);
1423 SET(offset, offset);
1424 SET(description, desc);
1425 SET(precision, 10);
1426 SET(is_absolute, 0);
1427
1428 #undef SET
1429 return 0;
1430 }
1431
create_int_type(int size,bool sign,bool hex)1432 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1433 {
1434 struct bt_ctf_field_type *type;
1435
1436 type = bt_ctf_field_type_integer_create(size);
1437 if (!type)
1438 return NULL;
1439
1440 if (sign &&
1441 bt_ctf_field_type_integer_set_signed(type, 1))
1442 goto err;
1443
1444 if (hex &&
1445 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1446 goto err;
1447
1448 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1449 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1450 #else
1451 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1452 #endif
1453
1454 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1455 size, sign ? "un" : "", hex ? "hex" : "");
1456 return type;
1457
1458 err:
1459 bt_ctf_field_type_put(type);
1460 return NULL;
1461 }
1462
ctf_writer__cleanup_data(struct ctf_writer * cw)1463 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1464 {
1465 unsigned int i;
1466
1467 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1468 bt_ctf_field_type_put(cw->data.array[i]);
1469 }
1470
ctf_writer__init_data(struct ctf_writer * cw)1471 static int ctf_writer__init_data(struct ctf_writer *cw)
1472 {
1473 #define CREATE_INT_TYPE(type, size, sign, hex) \
1474 do { \
1475 (type) = create_int_type(size, sign, hex); \
1476 if (!(type)) \
1477 goto err; \
1478 } while (0)
1479
1480 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1481 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1482 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1483 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1484 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1485 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1486
1487 cw->data.string = bt_ctf_field_type_string_create();
1488 if (cw->data.string)
1489 return 0;
1490
1491 err:
1492 ctf_writer__cleanup_data(cw);
1493 pr_err("Failed to create data types.\n");
1494 return -1;
1495 }
1496
ctf_writer__cleanup(struct ctf_writer * cw)1497 static void ctf_writer__cleanup(struct ctf_writer *cw)
1498 {
1499 ctf_writer__cleanup_data(cw);
1500
1501 bt_ctf_clock_put(cw->clock);
1502 free_streams(cw);
1503 bt_ctf_stream_class_put(cw->stream_class);
1504 bt_ctf_writer_put(cw->writer);
1505
1506 /* and NULL all the pointers */
1507 memset(cw, 0, sizeof(*cw));
1508 }
1509
ctf_writer__init(struct ctf_writer * cw,const char * path,struct perf_session * session,bool tod)1510 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1511 struct perf_session *session, bool tod)
1512 {
1513 struct bt_ctf_writer *writer;
1514 struct bt_ctf_stream_class *stream_class;
1515 struct bt_ctf_clock *clock;
1516 struct bt_ctf_field_type *pkt_ctx_type;
1517 int ret;
1518
1519 /* CTF writer */
1520 writer = bt_ctf_writer_create(path);
1521 if (!writer)
1522 goto err;
1523
1524 cw->writer = writer;
1525
1526 /* CTF clock */
1527 clock = bt_ctf_clock_create("perf_clock");
1528 if (!clock) {
1529 pr("Failed to create CTF clock.\n");
1530 goto err_cleanup;
1531 }
1532
1533 cw->clock = clock;
1534
1535 if (ctf_writer__setup_clock(cw, session, tod)) {
1536 pr("Failed to setup CTF clock.\n");
1537 goto err_cleanup;
1538 }
1539
1540 /* CTF stream class */
1541 stream_class = bt_ctf_stream_class_create("perf_stream");
1542 if (!stream_class) {
1543 pr("Failed to create CTF stream class.\n");
1544 goto err_cleanup;
1545 }
1546
1547 cw->stream_class = stream_class;
1548
1549 /* CTF clock stream setup */
1550 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1551 pr("Failed to assign CTF clock to stream class.\n");
1552 goto err_cleanup;
1553 }
1554
1555 if (ctf_writer__init_data(cw))
1556 goto err_cleanup;
1557
1558 /* Add cpu_id for packet context */
1559 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1560 if (!pkt_ctx_type)
1561 goto err_cleanup;
1562
1563 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1564 bt_ctf_field_type_put(pkt_ctx_type);
1565 if (ret)
1566 goto err_cleanup;
1567
1568 /* CTF clock writer setup */
1569 if (bt_ctf_writer_add_clock(writer, clock)) {
1570 pr("Failed to assign CTF clock to writer.\n");
1571 goto err_cleanup;
1572 }
1573
1574 return 0;
1575
1576 err_cleanup:
1577 ctf_writer__cleanup(cw);
1578 err:
1579 pr_err("Failed to setup CTF writer.\n");
1580 return -1;
1581 }
1582
ctf_writer__flush_streams(struct ctf_writer * cw)1583 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1584 {
1585 int cpu, ret = 0;
1586
1587 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1588 ret = ctf_stream__flush(cw->stream[cpu]);
1589
1590 return ret;
1591 }
1592
convert__config(const char * var,const char * value,void * cb)1593 static int convert__config(const char *var, const char *value, void *cb)
1594 {
1595 struct convert *c = cb;
1596
1597 if (!strcmp(var, "convert.queue-size"))
1598 return perf_config_u64(&c->queue_size, var, value);
1599
1600 return 0;
1601 }
1602
bt_convert__perf2ctf(const char * input,const char * path,struct perf_data_convert_opts * opts)1603 int bt_convert__perf2ctf(const char *input, const char *path,
1604 struct perf_data_convert_opts *opts)
1605 {
1606 struct perf_session *session;
1607 struct perf_data data = {
1608 .path = input,
1609 .mode = PERF_DATA_MODE_READ,
1610 .force = opts->force,
1611 };
1612 struct convert c = {};
1613 struct ctf_writer *cw = &c.writer;
1614 int err;
1615
1616 perf_tool__init(&c.tool, /*ordered_events=*/true);
1617 c.tool.sample = process_sample_event;
1618 c.tool.mmap = perf_event__process_mmap;
1619 c.tool.mmap2 = perf_event__process_mmap2;
1620 c.tool.comm = perf_event__process_comm;
1621 c.tool.exit = perf_event__process_exit;
1622 c.tool.fork = perf_event__process_fork;
1623 c.tool.lost = perf_event__process_lost;
1624 c.tool.tracing_data = perf_event__process_tracing_data;
1625 c.tool.build_id = perf_event__process_build_id;
1626 c.tool.namespaces = perf_event__process_namespaces;
1627 c.tool.ordering_requires_timestamps = true;
1628
1629 if (opts->all) {
1630 c.tool.comm = process_comm_event;
1631 c.tool.exit = process_exit_event;
1632 c.tool.fork = process_fork_event;
1633 c.tool.mmap = process_mmap_event;
1634 c.tool.mmap2 = process_mmap2_event;
1635 }
1636
1637 err = perf_config(convert__config, &c);
1638 if (err)
1639 return err;
1640
1641 err = -1;
1642 /* perf.data session */
1643 session = perf_session__new(&data, &c.tool);
1644 if (IS_ERR(session))
1645 return PTR_ERR(session);
1646
1647 /* CTF writer */
1648 if (ctf_writer__init(cw, path, session, opts->tod))
1649 goto free_session;
1650
1651 if (c.queue_size) {
1652 ordered_events__set_alloc_size(&session->ordered_events,
1653 c.queue_size);
1654 }
1655
1656 /* CTF writer env/clock setup */
1657 if (ctf_writer__setup_env(cw, session))
1658 goto free_writer;
1659
1660 /* CTF events setup */
1661 if (setup_events(cw, session))
1662 goto free_writer;
1663
1664 if (opts->all && setup_non_sample_events(cw, session))
1665 goto free_writer;
1666
1667 if (setup_streams(cw, session))
1668 goto free_writer;
1669
1670 err = perf_session__process_events(session);
1671 if (!err)
1672 err = ctf_writer__flush_streams(cw);
1673 else
1674 pr_err("Error during conversion.\n");
1675
1676 fprintf(stderr,
1677 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1678 data.path, path);
1679
1680 fprintf(stderr,
1681 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1682 (double) c.events_size / 1024.0 / 1024.0,
1683 c.events_count);
1684
1685 if (!c.non_sample_count)
1686 fprintf(stderr, ") ]\n");
1687 else
1688 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1689
1690 cleanup_events(session);
1691 perf_session__delete(session);
1692 ctf_writer__cleanup(cw);
1693
1694 return err;
1695
1696 free_writer:
1697 ctf_writer__cleanup(cw);
1698 free_session:
1699 perf_session__delete(session);
1700 pr_err("Error during conversion setup.\n");
1701 return err;
1702 }
1703