1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_tokenizer.h"
18
19 #include <cinttypes>
20 #include <limits>
21
22 #include "perfetto/base/logging.h"
23 #include "perfetto/base/status.h"
24 #include "perfetto/ext/base/string_view.h"
25 #include "perfetto/trace_processor/trace_blob.h"
26 #include "src/trace_processor/importers/common/cpu_tracker.h"
27 #include "src/trace_processor/importers/common/process_tracker.h"
28 #include "src/trace_processor/importers/common/slice_tracker.h"
29 #include "src/trace_processor/importers/fuchsia/fuchsia_record.h"
30 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_parser.h"
31 #include "src/trace_processor/importers/proto/proto_trace_reader.h"
32 #include "src/trace_processor/sorter/trace_sorter.h"
33 #include "src/trace_processor/types/task_state.h"
34 #include "src/trace_processor/types/trace_processor_context.h"
35
36 namespace perfetto {
37 namespace trace_processor {
38
39 namespace {
40
41 using fuchsia_trace_utils::ArgValue;
42
43 // Record types
44 constexpr uint32_t kMetadata = 0;
45 constexpr uint32_t kInitialization = 1;
46 constexpr uint32_t kString = 2;
47 constexpr uint32_t kThread = 3;
48 constexpr uint32_t kEvent = 4;
49 constexpr uint32_t kBlob = 5;
50 constexpr uint32_t kKernelObject = 7;
51 constexpr uint32_t kSchedulerEvent = 8;
52
53 constexpr uint32_t kSchedulerEventLegacyContextSwitch = 0;
54 constexpr uint32_t kSchedulerEventContextSwitch = 1;
55 constexpr uint32_t kSchedulerEventThreadWakeup = 2;
56
57 // Metadata types
58 constexpr uint32_t kProviderInfo = 1;
59 constexpr uint32_t kProviderSection = 2;
60 constexpr uint32_t kProviderEvent = 3;
61
62 // Thread states
63 constexpr uint32_t kThreadNew = 0;
64 constexpr uint32_t kThreadRunning = 1;
65 constexpr uint32_t kThreadSuspended = 2;
66 constexpr uint32_t kThreadBlocked = 3;
67 constexpr uint32_t kThreadDying = 4;
68 constexpr uint32_t kThreadDead = 5;
69
70 // Zircon object types
71 constexpr uint32_t kZxObjTypeProcess = 1;
72 constexpr uint32_t kZxObjTypeThread = 2;
73
74 constexpr int32_t kIdleWeight = std::numeric_limits<int32_t>::min();
75
76 } // namespace
77
FuchsiaTraceTokenizer(TraceProcessorContext * context)78 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
79 : context_(context),
80 proto_reader_(context),
81 running_string_id_(context->storage->InternString("Running")),
82 runnable_string_id_(context->storage->InternString("R")),
83 preempted_string_id_(context->storage->InternString("R+")),
84 waking_string_id_(context->storage->InternString("W")),
85 blocked_string_id_(context->storage->InternString("S")),
86 suspended_string_id_(context->storage->InternString("T")),
87 exit_dying_string_id_(context->storage->InternString("Z")),
88 exit_dead_string_id_(context->storage->InternString("X")),
89 incoming_weight_id_(context->storage->InternString("incoming_weight")),
90 outgoing_weight_id_(context->storage->InternString("outgoing_weight")),
91 weight_id_(context->storage->InternString("weight")),
92 process_id_(context->storage->InternString("process")) {
93 RegisterProvider(0, "");
94 }
95
96 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
97
Parse(TraceBlobView blob)98 util::Status FuchsiaTraceTokenizer::Parse(TraceBlobView blob) {
99 size_t size = blob.size();
100
101 // The relevant internal state is |leftover_bytes_|. Each call to Parse should
102 // maintain the following properties, unless a fatal error occurs in which
103 // case it should return false and no assumptions should be made about the
104 // resulting internal state:
105 //
106 // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
107 // is present in |leftover_bytes_|, but not both.
108 // 2) |leftover_bytes_| does not contain a complete record.
109 //
110 // Parse is responsible for creating the "full" |TraceBlobView|s, which own
111 // the underlying data. Generally, there will be one such view. However, if
112 // there is a record that started in an earlier call, then a new buffer is
113 // created here to make the bytes in that record contiguous.
114 //
115 // Because some of the bytes in |data| might belong to the record starting in
116 // |leftover_bytes_|, we track the offset at which the following record will
117 // start.
118 size_t byte_offset = 0;
119
120 // Look for a record starting with the leftover bytes.
121 if (leftover_bytes_.size() + size < 8) {
122 // Even with the new bytes, we can't even read the header of the next
123 // record, so just add the new bytes to |leftover_bytes_| and return.
124 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
125 blob.data() + size);
126 return util::OkStatus();
127 }
128 if (!leftover_bytes_.empty()) {
129 // There is a record starting from leftover bytes.
130 if (leftover_bytes_.size() < 8) {
131 // Header was previously incomplete, but we have enough now.
132 // Copy bytes into |leftover_bytes_| so that the whole header is present,
133 // and update |byte_offset| and |size| accordingly.
134 size_t needed_bytes = 8 - leftover_bytes_.size();
135 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
136 blob.data() + needed_bytes);
137 byte_offset += needed_bytes;
138 size -= needed_bytes;
139 }
140 // Read the record length from the header.
141 uint64_t header =
142 *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
143 uint32_t record_len_words =
144 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
145 uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
146
147 // From property (2) above, leftover_bytes_ must have had less than a full
148 // record to start with. We padded leftover_bytes_ out to read the header,
149 // so it may now be a full record (in the case that the record consists of
150 // only the header word), but it still cannot have any extra bytes.
151 PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
152 size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
153
154 if (missing_bytes <= size) {
155 // We have enough bytes to complete the partial record. Create a new
156 // buffer for that record.
157 TraceBlob buf = TraceBlob::Allocate(record_len_bytes);
158 memcpy(buf.data(), leftover_bytes_.data(), leftover_bytes_.size());
159 memcpy(buf.data() + leftover_bytes_.size(), blob.data() + byte_offset,
160 missing_bytes);
161 byte_offset += missing_bytes;
162 size -= missing_bytes;
163 leftover_bytes_.clear();
164 ParseRecord(TraceBlobView(std::move(buf)));
165 } else {
166 // There are not enough bytes for the full record. Add all the bytes we
167 // have to leftover_bytes_ and wait for more.
168 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
169 blob.data() + byte_offset + size);
170 return util::OkStatus();
171 }
172 }
173
174 TraceBlobView full_view = blob.slice_off(byte_offset, size);
175
176 // |record_offset| is a number of bytes past |byte_offset| where the record
177 // under consideration starts. As a result, it must always be in the range [0,
178 // size-8]. Any larger offset means we don't have enough bytes for the header.
179 size_t record_offset = 0;
180 while (record_offset + 8 <= size) {
181 uint64_t header =
182 *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
183 uint32_t record_len_bytes =
184 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
185 sizeof(uint64_t);
186 if (record_len_bytes == 0)
187 return util::ErrStatus("Unexpected record of size 0");
188
189 if (record_offset + record_len_bytes > size)
190 break;
191
192 TraceBlobView record = full_view.slice_off(record_offset, record_len_bytes);
193 ParseRecord(std::move(record));
194
195 record_offset += record_len_bytes;
196 }
197
198 leftover_bytes_.insert(leftover_bytes_.end(),
199 full_view.data() + record_offset,
200 full_view.data() + size);
201
202 TraceBlob perfetto_blob =
203 TraceBlob::CopyFrom(proto_trace_data_.data(), proto_trace_data_.size());
204 proto_trace_data_.clear();
205
206 return proto_reader_.Parse(TraceBlobView(std::move(perfetto_blob)));
207 }
208
IdForOutgoingThreadState(uint32_t state)209 StringId FuchsiaTraceTokenizer::IdForOutgoingThreadState(uint32_t state) {
210 switch (state) {
211 case kThreadNew:
212 case kThreadRunning:
213 return runnable_string_id_;
214 case kThreadBlocked:
215 return blocked_string_id_;
216 case kThreadSuspended:
217 return suspended_string_id_;
218 case kThreadDying:
219 return exit_dying_string_id_;
220 case kThreadDead:
221 return exit_dead_string_id_;
222 default:
223 return kNullStringId;
224 }
225 }
226
SwitchFrom(Thread * thread,int64_t ts,uint32_t cpu,uint32_t thread_state)227 void FuchsiaTraceTokenizer::SwitchFrom(Thread* thread,
228 int64_t ts,
229 uint32_t cpu,
230 uint32_t thread_state) {
231 TraceStorage* storage = context_->storage.get();
232 ProcessTracker* procs = context_->process_tracker.get();
233
234 StringId state = IdForOutgoingThreadState(thread_state);
235 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
236 static_cast<uint32_t>(thread->info.pid));
237
238 const auto duration = ts - thread->last_ts;
239 thread->last_ts = ts;
240
241 // Close the slice record if one is open for this thread.
242 if (thread->last_slice_row.has_value()) {
243 auto row_ref = thread->last_slice_row->ToRowReference(
244 storage->mutable_sched_slice_table());
245 row_ref.set_dur(duration);
246 row_ref.set_end_state(state);
247 thread->last_slice_row.reset();
248 }
249
250 // Close the state record if one is open for this thread.
251 if (thread->last_state_row.has_value()) {
252 auto row_ref = thread->last_state_row->ToRowReference(
253 storage->mutable_thread_state_table());
254 row_ref.set_dur(duration);
255 thread->last_state_row.reset();
256 }
257
258 // Open a new state record to track the duration of the outgoing
259 // state.
260 tables::ThreadStateTable::Row state_row;
261 state_row.ts = ts;
262 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
263 state_row.dur = -1;
264 state_row.state = state;
265 state_row.utid = utid;
266 auto state_row_number =
267 storage->mutable_thread_state_table()->Insert(state_row).row_number;
268 thread->last_state_row = state_row_number;
269 }
270
SwitchTo(Thread * thread,int64_t ts,uint32_t cpu,int32_t weight)271 void FuchsiaTraceTokenizer::SwitchTo(Thread* thread,
272 int64_t ts,
273 uint32_t cpu,
274 int32_t weight) {
275 TraceStorage* storage = context_->storage.get();
276 ProcessTracker* procs = context_->process_tracker.get();
277
278 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
279 static_cast<uint32_t>(thread->info.pid));
280
281 const auto duration = ts - thread->last_ts;
282 thread->last_ts = ts;
283
284 // Close the state record if one is open for this thread.
285 if (thread->last_state_row.has_value()) {
286 auto row_ref = thread->last_state_row->ToRowReference(
287 storage->mutable_thread_state_table());
288 row_ref.set_dur(duration);
289 thread->last_state_row.reset();
290 }
291
292 auto ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
293 // Open a new slice record for this thread.
294 tables::SchedSliceTable::Row slice_row;
295 slice_row.ts = ts;
296 slice_row.ucpu = ucpu;
297 slice_row.dur = -1;
298 slice_row.utid = utid;
299 slice_row.priority = weight;
300 auto slice_row_number =
301 storage->mutable_sched_slice_table()->Insert(slice_row).row_number;
302 thread->last_slice_row = slice_row_number;
303
304 // Open a new state record for this thread.
305 tables::ThreadStateTable::Row state_row;
306 state_row.ts = ts;
307 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
308 state_row.dur = -1;
309 state_row.state = running_string_id_;
310 state_row.utid = utid;
311 auto state_row_number =
312 storage->mutable_thread_state_table()->Insert(state_row).row_number;
313 thread->last_state_row = state_row_number;
314 }
315
Wake(Thread * thread,int64_t ts,uint32_t cpu)316 void FuchsiaTraceTokenizer::Wake(Thread* thread, int64_t ts, uint32_t cpu) {
317 TraceStorage* storage = context_->storage.get();
318 ProcessTracker* procs = context_->process_tracker.get();
319
320 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
321 static_cast<uint32_t>(thread->info.pid));
322
323 const auto duration = ts - thread->last_ts;
324 thread->last_ts = ts;
325
326 // Close the state record if one is open for this thread.
327 if (thread->last_state_row.has_value()) {
328 auto row_ref = thread->last_state_row->ToRowReference(
329 storage->mutable_thread_state_table());
330 row_ref.set_dur(duration);
331 thread->last_state_row.reset();
332 }
333
334 // Open a new state record for this thread.
335 tables::ThreadStateTable::Row state_row;
336 state_row.ts = ts;
337 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
338 state_row.dur = -1;
339 state_row.state = waking_string_id_;
340 state_row.utid = utid;
341 auto state_row_number =
342 storage->mutable_thread_state_table()->Insert(state_row).row_number;
343 thread->last_state_row = state_row_number;
344 }
345
346 // Most record types are read and recorded in |TraceStorage| here directly.
347 // Event records are sorted by timestamp before processing, so instead of
348 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
349 // facilitate the parsing after sorting, a small view of the provider's string
350 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)351 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
352 TraceStorage* storage = context_->storage.get();
353 ProcessTracker* procs = context_->process_tracker.get();
354 TraceSorter* sorter = context_->sorter.get();
355
356 fuchsia_trace_utils::RecordCursor cursor(tbv.data(), tbv.length());
357 uint64_t header;
358 if (!cursor.ReadUint64(&header)) {
359 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
360 return;
361 }
362
363 uint32_t record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
364
365 // All non-metadata events require current_provider_ to be set.
366 if (record_type != kMetadata && current_provider_ == nullptr) {
367 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
368 return;
369 }
370
371 // Adapters for FuchsiaTraceParser::ParseArgs.
372 const auto intern_string = [this](base::StringView string) {
373 return context_->storage->InternString(string);
374 };
375 const auto get_string = [this](uint16_t index) {
376 return current_provider_->GetString(index);
377 };
378
379 switch (record_type) {
380 case kMetadata: {
381 uint32_t metadata_type =
382 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
383 switch (metadata_type) {
384 case kProviderInfo: {
385 uint32_t provider_id =
386 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
387 uint32_t name_len =
388 fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
389 base::StringView name_view;
390 if (!cursor.ReadInlineString(name_len, &name_view)) {
391 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
392 return;
393 }
394 RegisterProvider(provider_id, name_view.ToStdString());
395 break;
396 }
397 case kProviderSection: {
398 uint32_t provider_id =
399 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
400 current_provider_ = providers_[provider_id].get();
401 break;
402 }
403 case kProviderEvent: {
404 // TODO(bhamrick): Handle buffer fill events
405 PERFETTO_DLOG(
406 "Ignoring provider event. Events may have been dropped");
407 break;
408 }
409 }
410 break;
411 }
412 case kInitialization: {
413 if (!cursor.ReadUint64(¤t_provider_->ticks_per_second)) {
414 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
415 return;
416 }
417 break;
418 }
419 case kString: {
420 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
421 if (index != 0) {
422 uint32_t len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
423 base::StringView s;
424 if (!cursor.ReadInlineString(len, &s)) {
425 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
426 return;
427 }
428 StringId id = storage->InternString(s);
429
430 current_provider_->string_table[index] = id;
431 }
432 break;
433 }
434 case kThread: {
435 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
436 if (index != 0) {
437 FuchsiaThreadInfo tinfo;
438 if (!cursor.ReadInlineThread(&tinfo)) {
439 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
440 return;
441 }
442
443 current_provider_->thread_table[index] = tinfo;
444 }
445 break;
446 }
447 case kEvent: {
448 uint32_t thread_ref =
449 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
450 uint32_t cat_ref =
451 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
452 uint32_t name_ref =
453 fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
454
455 // Build the FuchsiaRecord for the event, i.e. extract the thread
456 // information if not inline, and any non-inline strings (name, category
457 // for now, arg names and string values in the future).
458 FuchsiaRecord record(std::move(tbv));
459 record.set_ticks_per_second(current_provider_->ticks_per_second);
460
461 uint64_t ticks;
462 if (!cursor.ReadUint64(&ticks)) {
463 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
464 return;
465 }
466 int64_t ts = fuchsia_trace_utils::TicksToNs(
467 ticks, current_provider_->ticks_per_second);
468 if (ts < 0) {
469 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
470 return;
471 }
472
473 if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
474 // Skip over inline thread
475 cursor.ReadInlineThread(nullptr);
476 } else {
477 record.InsertThread(thread_ref,
478 current_provider_->GetThread(thread_ref));
479 }
480
481 if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
482 // Skip over inline string
483 cursor.ReadInlineString(cat_ref, nullptr);
484 } else {
485 record.InsertString(cat_ref, current_provider_->GetString(cat_ref));
486 }
487
488 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
489 // Skip over inline string
490 cursor.ReadInlineString(name_ref, nullptr);
491 } else {
492 record.InsertString(name_ref, current_provider_->GetString(name_ref));
493 }
494
495 uint32_t n_args =
496 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 23);
497 for (uint32_t i = 0; i < n_args; i++) {
498 const size_t arg_base = cursor.WordIndex();
499 uint64_t arg_header;
500 if (!cursor.ReadUint64(&arg_header)) {
501 storage->IncrementStats(stats::fuchsia_invalid_event);
502 return;
503 }
504 uint32_t arg_type =
505 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
506 uint32_t arg_size_words =
507 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
508 uint32_t arg_name_ref =
509 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
510
511 if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
512 // Skip over inline string
513 cursor.ReadInlineString(arg_name_ref, nullptr);
514 } else {
515 record.InsertString(arg_name_ref,
516 current_provider_->GetString(arg_name_ref));
517 }
518
519 if (arg_type == ArgValue::ArgType::kString) {
520 uint32_t arg_value_ref =
521 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 32, 47);
522 if (fuchsia_trace_utils::IsInlineString(arg_value_ref)) {
523 // Skip over inline string
524 cursor.ReadInlineString(arg_value_ref, nullptr);
525 } else {
526 record.InsertString(arg_value_ref,
527 current_provider_->GetString(arg_value_ref));
528 }
529 }
530
531 cursor.SetWordIndex(arg_base + arg_size_words);
532 }
533
534 sorter->PushFuchsiaRecord(ts, std::move(record));
535 break;
536 }
537 case kBlob: {
538 constexpr uint32_t kPerfettoBlob = 3;
539 uint32_t blob_type =
540 fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 55);
541 if (blob_type == kPerfettoBlob) {
542 FuchsiaRecord record(std::move(tbv));
543 uint32_t blob_size =
544 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
545 uint32_t name_ref =
546 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 31);
547
548 // We don't need the name, but we still need to parse it in case it is
549 // inline
550 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
551 base::StringView name_view;
552 if (!cursor.ReadInlineString(name_ref, &name_view)) {
553 storage->IncrementStats(stats::fuchsia_invalid_event);
554 return;
555 }
556 }
557
558 // Append the Blob into the embedded perfetto bytes -- we'll parse them
559 // all after the main pass is done.
560 if (!cursor.ReadBlob(blob_size, proto_trace_data_)) {
561 storage->IncrementStats(stats::fuchsia_invalid_event);
562 return;
563 }
564 }
565 break;
566 }
567 case kKernelObject: {
568 uint32_t obj_type =
569 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
570 uint32_t name_ref =
571 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
572
573 uint64_t obj_id;
574 if (!cursor.ReadUint64(&obj_id)) {
575 storage->IncrementStats(stats::fuchsia_invalid_event);
576 return;
577 }
578
579 StringId name = StringId();
580 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
581 base::StringView name_view;
582 if (!cursor.ReadInlineString(name_ref, &name_view)) {
583 storage->IncrementStats(stats::fuchsia_invalid_event);
584 return;
585 }
586 name = storage->InternString(name_view);
587 } else {
588 name = current_provider_->GetString(name_ref);
589 }
590
591 switch (obj_type) {
592 case kZxObjTypeProcess: {
593 // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
594 // support 32 bits. This is usually not an issue except for
595 // artificial koids which have the 2^63 bit set. This is used for
596 // things such as virtual threads.
597 procs->SetProcessMetadata(
598 static_cast<uint32_t>(obj_id), std::optional<uint32_t>(),
599 base::StringView(storage->GetString(name)), base::StringView());
600 break;
601 }
602 case kZxObjTypeThread: {
603 uint32_t n_args =
604 fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
605
606 auto maybe_args = FuchsiaTraceParser::ParseArgs(
607 cursor, n_args, intern_string, get_string);
608 if (!maybe_args.has_value()) {
609 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
610 return;
611 }
612
613 uint64_t pid = 0;
614 for (const auto arg : *maybe_args) {
615 if (arg.name == process_id_) {
616 if (arg.value.Type() != ArgValue::ArgType::kKoid) {
617 storage->IncrementStats(stats::fuchsia_invalid_event);
618 return;
619 }
620 pid = arg.value.Koid();
621 }
622 }
623
624 Thread& thread = GetThread(obj_id);
625 thread.info.pid = pid;
626
627 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
628 static_cast<uint32_t>(pid));
629 auto& tt = *storage->mutable_thread_table();
630 tt[utid].set_name(name);
631 break;
632 }
633 default: {
634 PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
635 break;
636 }
637 }
638 break;
639 }
640 case kSchedulerEvent: {
641 // Context switch records come in order, so they do not need to go through
642 // TraceSorter.
643 uint32_t event_type =
644 fuchsia_trace_utils::ReadField<uint32_t>(header, 60, 63);
645 switch (event_type) {
646 case kSchedulerEventLegacyContextSwitch: {
647 uint32_t cpu =
648 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
649 uint32_t outgoing_state =
650 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 27);
651 uint32_t outgoing_thread_ref =
652 fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
653 int32_t outgoing_priority =
654 fuchsia_trace_utils::ReadField<int32_t>(header, 44, 51);
655 uint32_t incoming_thread_ref =
656 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
657 int32_t incoming_priority =
658 fuchsia_trace_utils::ReadField<int32_t>(header, 52, 59);
659
660 int64_t ts;
661 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
662 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
663 return;
664 }
665 if (ts == -1) {
666 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
667 return;
668 }
669
670 FuchsiaThreadInfo outgoing_thread_info;
671 if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
672 if (!cursor.ReadInlineThread(&outgoing_thread_info)) {
673 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
674 return;
675 }
676 } else {
677 outgoing_thread_info =
678 current_provider_->GetThread(outgoing_thread_ref);
679 }
680 Thread& outgoing_thread = GetThread(outgoing_thread_info.tid);
681
682 FuchsiaThreadInfo incoming_thread_info;
683 if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
684 if (!cursor.ReadInlineThread(&incoming_thread_info)) {
685 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
686 return;
687 }
688 } else {
689 incoming_thread_info =
690 current_provider_->GetThread(incoming_thread_ref);
691 }
692 Thread& incoming_thread = GetThread(incoming_thread_info.tid);
693
694 // Idle threads are identified by pid == 0 and prio == 0.
695 const bool incoming_is_idle =
696 incoming_thread.info.pid == 0 && incoming_priority == 0;
697 const bool outgoing_is_idle =
698 outgoing_thread.info.pid == 0 && outgoing_priority == 0;
699
700 // Handle switching away from the currently running thread.
701 if (!outgoing_is_idle) {
702 SwitchFrom(&outgoing_thread, ts, cpu, outgoing_state);
703 }
704
705 // Handle switching to the new currently running thread.
706 if (!incoming_is_idle) {
707 SwitchTo(&incoming_thread, ts, cpu, incoming_priority);
708 }
709 break;
710 }
711 case kSchedulerEventContextSwitch: {
712 const uint32_t argument_count =
713 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
714 const uint32_t cpu =
715 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 35);
716 const uint32_t outgoing_state =
717 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 39);
718
719 int64_t ts;
720 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
721 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
722 return;
723 }
724 if (ts < 0) {
725 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
726 return;
727 }
728
729 uint64_t outgoing_tid;
730 if (!cursor.ReadUint64(&outgoing_tid)) {
731 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
732 return;
733 }
734 Thread& outgoing_thread = GetThread(outgoing_tid);
735
736 uint64_t incoming_tid;
737 if (!cursor.ReadUint64(&incoming_tid)) {
738 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
739 return;
740 }
741 Thread& incoming_thread = GetThread(incoming_tid);
742
743 auto maybe_args = FuchsiaTraceParser::ParseArgs(
744 cursor, argument_count, intern_string, get_string);
745 if (!maybe_args.has_value()) {
746 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
747 return;
748 }
749
750 int32_t incoming_weight = 0;
751 int32_t outgoing_weight = 0;
752
753 for (const auto& arg : *maybe_args) {
754 if (arg.name == incoming_weight_id_) {
755 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
756 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
757 return;
758 }
759 incoming_weight = arg.value.Int32();
760 } else if (arg.name == outgoing_weight_id_) {
761 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
762 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
763 return;
764 }
765 outgoing_weight = arg.value.Int32();
766 }
767 }
768
769 const bool incoming_is_idle = incoming_weight == kIdleWeight;
770 const bool outgoing_is_idle = outgoing_weight == kIdleWeight;
771
772 // Handle switching away from the currently running thread.
773 if (!outgoing_is_idle) {
774 SwitchFrom(&outgoing_thread, ts, cpu, outgoing_state);
775 }
776
777 // Handle switching to the new currently running thread.
778 if (!incoming_is_idle) {
779 SwitchTo(&incoming_thread, ts, cpu, incoming_weight);
780 }
781 break;
782 }
783 case kSchedulerEventThreadWakeup: {
784 const uint32_t argument_count =
785 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
786 const uint32_t cpu =
787 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 35);
788
789 int64_t ts;
790 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
791 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
792 return;
793 }
794 if (ts < 0) {
795 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
796 return;
797 }
798
799 uint64_t waking_tid;
800 if (!cursor.ReadUint64(&waking_tid)) {
801 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
802 return;
803 }
804 Thread& waking_thread = GetThread(waking_tid);
805
806 auto maybe_args = FuchsiaTraceParser::ParseArgs(
807 cursor, argument_count, intern_string, get_string);
808 if (!maybe_args.has_value()) {
809 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
810 return;
811 }
812
813 int32_t waking_weight = 0;
814
815 for (const auto& arg : *maybe_args) {
816 if (arg.name == weight_id_) {
817 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
818 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
819 return;
820 }
821 waking_weight = arg.value.Int32();
822 }
823 }
824
825 const bool waking_is_idle = waking_weight == kIdleWeight;
826 if (!waking_is_idle) {
827 Wake(&waking_thread, ts, cpu);
828 }
829 break;
830 }
831 default:
832 PERFETTO_DLOG("Skipping unknown scheduler event type %d", event_type);
833 break;
834 }
835
836 break;
837 }
838 default: {
839 PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
840 break;
841 }
842 }
843 }
844
RegisterProvider(uint32_t provider_id,std::string name)845 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
846 std::string name) {
847 std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
848 provider->name = name;
849 current_provider_ = provider.get();
850 providers_[provider_id] = std::move(provider);
851 }
852
NotifyEndOfFile()853 base::Status FuchsiaTraceTokenizer::NotifyEndOfFile() {
854 return base::OkStatus();
855 }
856
857 } // namespace trace_processor
858 } // namespace perfetto
859