xref: /aosp_15_r20/external/perfetto/src/trace_processor/export_json.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/trace_processor/export_json.h"
18 
19 #include <algorithm>
20 #include <cmath>
21 #include <cstdint>
22 #include <cstdio>
23 #include <cstring>
24 #include <deque>
25 #include <limits>
26 #include <map>
27 #include <memory>
28 #include <optional>
29 #include <sstream>
30 #include <string>
31 #include <tuple>
32 #include <unordered_map>
33 #include <utility>
34 #include <vector>
35 
36 #include "perfetto/base/build_config.h"
37 #include "perfetto/base/logging.h"
38 #include "perfetto/base/status.h"
39 #include "perfetto/ext/base/string_splitter.h"
40 #include "perfetto/ext/base/string_utils.h"
41 #include "perfetto/public/compiler.h"
42 #include "perfetto/trace_processor/basic_types.h"
43 #include "src/trace_processor/containers/null_term_string_view.h"
44 #include "src/trace_processor/export_json.h"
45 #include "src/trace_processor/storage/metadata.h"
46 #include "src/trace_processor/storage/stats.h"
47 #include "src/trace_processor/storage/trace_storage.h"
48 #include "src/trace_processor/tables/metadata_tables_py.h"
49 #include "src/trace_processor/tables/profiler_tables_py.h"
50 #include "src/trace_processor/trace_processor_storage_impl.h"
51 #include "src/trace_processor/types/trace_processor_context.h"
52 #include "src/trace_processor/types/variadic.h"
53 #include "src/trace_processor/util/status_macros.h"
54 
55 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
56 #include <json/config.h>
57 #include <json/reader.h>
58 #include <json/value.h>
59 #include <json/writer.h>
60 #endif
61 
62 namespace perfetto::trace_processor::json {
63 
64 namespace {
65 
66 class FileWriter : public OutputWriter {
67  public:
FileWriter(FILE * file)68   explicit FileWriter(FILE* file) : file_(file) {}
~FileWriter()69   ~FileWriter() override { fflush(file_); }
70 
AppendString(const std::string & s)71   base::Status AppendString(const std::string& s) override {
72     size_t written =
73         fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
74     if (written != s.size())
75       return base::ErrStatus("Error writing to file: %d", ferror(file_));
76     return base::OkStatus();
77   }
78 
79  private:
80   FILE* file_;
81 };
82 
83 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
84 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
85 
86 const char kLegacyEventArgsKey[] = "legacy_event";
87 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
88 const char kLegacyEventCategoryKey[] = "category";
89 const char kLegacyEventNameKey[] = "name";
90 const char kLegacyEventPhaseKey[] = "phase";
91 const char kLegacyEventDurationNsKey[] = "duration_ns";
92 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
93 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
94 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
95 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
96 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
97 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
98 const char kLegacyEventGlobalIdKey[] = "global_id";
99 const char kLegacyEventLocalIdKey[] = "local_id";
100 const char kLegacyEventIdScopeKey[] = "id_scope";
101 const char kStrippedArgument[] = "__stripped__";
102 
GetNonNullString(const TraceStorage * storage,std::optional<StringId> id)103 const char* GetNonNullString(const TraceStorage* storage,
104                              std::optional<StringId> id) {
105   return id == std::nullopt || *id == kNullStringId
106              ? ""
107              : storage->GetString(*id).c_str();
108 }
109 
110 class JsonExporter {
111  public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)112   JsonExporter(const TraceStorage* storage,
113                OutputWriter* output,
114                ArgumentFilterPredicate argument_filter,
115                MetadataFilterPredicate metadata_filter,
116                LabelFilterPredicate label_filter)
117       : storage_(storage),
118         args_builder_(storage_),
119         writer_(output,
120                 std::move(argument_filter),
121                 std::move(metadata_filter),
122                 std::move(label_filter)) {}
123 
Export()124   base::Status Export() {
125     RETURN_IF_ERROR(MapUniquePidsAndTids());
126     RETURN_IF_ERROR(ExportThreadNames());
127     RETURN_IF_ERROR(ExportProcessNames());
128     RETURN_IF_ERROR(ExportProcessUptimes());
129     RETURN_IF_ERROR(ExportSlices());
130     RETURN_IF_ERROR(ExportFlows());
131     RETURN_IF_ERROR(ExportRawEvents());
132     RETURN_IF_ERROR(ExportCpuProfileSamples());
133     RETURN_IF_ERROR(ExportMetadata());
134     RETURN_IF_ERROR(ExportStats());
135     RETURN_IF_ERROR(ExportMemorySnapshots());
136     return base::OkStatus();
137   }
138 
139  private:
140   class TraceFormatWriter {
141    public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)142     TraceFormatWriter(OutputWriter* output,
143                       ArgumentFilterPredicate argument_filter,
144                       MetadataFilterPredicate metadata_filter,
145                       LabelFilterPredicate label_filter)
146         : output_(output),
147           argument_filter_(std::move(argument_filter)),
148           metadata_filter_(std::move(metadata_filter)),
149           label_filter_(std::move(label_filter)),
150           first_event_(true) {
151       Json::StreamWriterBuilder b;
152       b.settings_["indentation"] = "";
153       writer_.reset(b.newStreamWriter());
154       WriteHeader();
155     }
156 
~TraceFormatWriter()157     ~TraceFormatWriter() { WriteFooter(); }
158 
WriteCommonEvent(const Json::Value & event)159     void WriteCommonEvent(const Json::Value& event) {
160       if (label_filter_ && !label_filter_("traceEvents"))
161         return;
162 
163       DoWriteEvent(event);
164     }
165 
AddAsyncBeginEvent(const Json::Value & event)166     void AddAsyncBeginEvent(const Json::Value& event) {
167       if (label_filter_ && !label_filter_("traceEvents"))
168         return;
169 
170       async_begin_events_.push_back(event);
171     }
172 
AddAsyncInstantEvent(const Json::Value & event)173     void AddAsyncInstantEvent(const Json::Value& event) {
174       if (label_filter_ && !label_filter_("traceEvents"))
175         return;
176 
177       async_instant_events_.push_back(event);
178     }
179 
AddAsyncEndEvent(const Json::Value & event)180     void AddAsyncEndEvent(const Json::Value& event) {
181       if (label_filter_ && !label_filter_("traceEvents"))
182         return;
183 
184       async_end_events_.push_back(event);
185     }
186 
SortAndEmitAsyncEvents()187     void SortAndEmitAsyncEvents() {
188       // Catapult doesn't handle out-of-order begin/end events well, especially
189       // when their timestamps are the same, but their order is incorrect. Since
190       // we process events sorted by begin timestamp, |async_begin_events_| and
191       // |async_instant_events_| are already sorted. We now only have to sort
192       // |async_end_events_| and merge-sort all events into a single sequence.
193 
194       // Sort |async_end_events_|. Note that we should order by ascending
195       // timestamp, but in reverse-stable order. This way, a child slices's end
196       // is emitted before its parent's end event, even if both end events have
197       // the same timestamp. To accomplish this, we perform a stable sort in
198       // descending order and later iterate via reverse iterators.
199       struct {
200         bool operator()(const Json::Value& a, const Json::Value& b) const {
201           return a["ts"].asInt64() > b["ts"].asInt64();
202         }
203       } CompareEvents;
204       std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
205                        CompareEvents);
206 
207       // Merge sort by timestamp. If events share the same timestamp, prefer
208       // instant events, then end events, so that old slices close before new
209       // ones are opened, but instant events remain in their deepest nesting
210       // level.
211       auto instant_event_it = async_instant_events_.begin();
212       auto end_event_it = async_end_events_.rbegin();
213       auto begin_event_it = async_begin_events_.begin();
214 
215       auto has_instant_event = instant_event_it != async_instant_events_.end();
216       auto has_end_event = end_event_it != async_end_events_.rend();
217       auto has_begin_event = begin_event_it != async_begin_events_.end();
218 
219       auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
220         DoWriteEvent(*instant_event_it);
221         instant_event_it++;
222         has_instant_event = instant_event_it != async_instant_events_.end();
223       };
224       auto emit_next_end = [&end_event_it, &has_end_event, this]() {
225         DoWriteEvent(*end_event_it);
226         end_event_it++;
227         has_end_event = end_event_it != async_end_events_.rend();
228       };
229       auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
230         DoWriteEvent(*begin_event_it);
231         begin_event_it++;
232         has_begin_event = begin_event_it != async_begin_events_.end();
233       };
234 
235       auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
236                                        &emit_next_instant, &emit_next_end]() {
237         if ((*instant_event_it)["ts"].asInt64() <=
238             (*end_event_it)["ts"].asInt64()) {
239           emit_next_instant();
240         } else {
241           emit_next_end();
242         }
243       };
244       auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
245                                          &emit_next_instant,
246                                          &emit_next_begin]() {
247         if ((*instant_event_it)["ts"].asInt64() <=
248             (*begin_event_it)["ts"].asInt64()) {
249           emit_next_instant();
250         } else {
251           emit_next_begin();
252         }
253       };
254       auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
255                                      &emit_next_end, &emit_next_begin]() {
256         if ((*end_event_it)["ts"].asInt64() <=
257             (*begin_event_it)["ts"].asInt64()) {
258           emit_next_end();
259         } else {
260           emit_next_begin();
261         }
262       };
263 
264       // While we still have events in all iterators, consider each.
265       while (has_instant_event && has_end_event && has_begin_event) {
266         if ((*instant_event_it)["ts"].asInt64() <=
267             (*end_event_it)["ts"].asInt64()) {
268           emit_next_instant_or_begin();
269         } else {
270           emit_next_end_or_begin();
271         }
272       }
273 
274       // Only instant and end events left.
275       while (has_instant_event && has_end_event) {
276         emit_next_instant_or_end();
277       }
278 
279       // Only instant and begin events left.
280       while (has_instant_event && has_begin_event) {
281         emit_next_instant_or_begin();
282       }
283 
284       // Only end and begin events left.
285       while (has_end_event && has_begin_event) {
286         emit_next_end_or_begin();
287       }
288 
289       // Remaining instant events.
290       while (has_instant_event) {
291         emit_next_instant();
292       }
293 
294       // Remaining end events.
295       while (has_end_event) {
296         emit_next_end();
297       }
298 
299       // Remaining begin events.
300       while (has_begin_event) {
301         emit_next_begin();
302       }
303     }
304 
WriteMetadataEvent(const char * metadata_type,const char * metadata_arg_name,const char * metadata_arg_value,uint32_t pid,uint32_t tid)305     void WriteMetadataEvent(const char* metadata_type,
306                             const char* metadata_arg_name,
307                             const char* metadata_arg_value,
308                             uint32_t pid,
309                             uint32_t tid) {
310       if (label_filter_ && !label_filter_("traceEvents"))
311         return;
312 
313       std::ostringstream ss;
314       if (!first_event_)
315         ss << ",\n";
316 
317       Json::Value value;
318       value["ph"] = "M";
319       value["cat"] = "__metadata";
320       value["ts"] = 0;
321       value["name"] = metadata_type;
322       value["pid"] = Json::Int(pid);
323       value["tid"] = Json::Int(tid);
324 
325       Json::Value args;
326       args[metadata_arg_name] = metadata_arg_value;
327       value["args"] = args;
328 
329       writer_->write(value, &ss);
330       output_->AppendString(ss.str());
331       first_event_ = false;
332     }
333 
MergeMetadata(const Json::Value & value)334     void MergeMetadata(const Json::Value& value) {
335       for (const auto& member : value.getMemberNames()) {
336         metadata_[member] = value[member];
337       }
338     }
339 
AppendTelemetryMetadataString(const char * key,const char * value)340     void AppendTelemetryMetadataString(const char* key, const char* value) {
341       metadata_["telemetry"][key].append(value);
342     }
343 
AppendTelemetryMetadataInt(const char * key,int64_t value)344     void AppendTelemetryMetadataInt(const char* key, int64_t value) {
345       metadata_["telemetry"][key].append(Json::Int64(value));
346     }
347 
AppendTelemetryMetadataBool(const char * key,bool value)348     void AppendTelemetryMetadataBool(const char* key, bool value) {
349       metadata_["telemetry"][key].append(value);
350     }
351 
SetTelemetryMetadataTimestamp(const char * key,int64_t value)352     void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
353       metadata_["telemetry"][key] = static_cast<double>(value) / 1000.0;
354     }
355 
SetStats(const char * key,int64_t value)356     void SetStats(const char* key, int64_t value) {
357       metadata_["trace_processor_stats"][key] = Json::Int64(value);
358     }
359 
SetStats(const char * key,const IndexMap & indexed_values)360     void SetStats(const char* key, const IndexMap& indexed_values) {
361       constexpr const char* kBufferStatsPrefix = "traced_buf_";
362 
363       // Stats for the same buffer should be grouped together in the JSON.
364       if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
365         for (const auto& value : indexed_values) {
366           metadata_["trace_processor_stats"]["traced_buf"][value.first]
367                    [key + strlen(kBufferStatsPrefix)] =
368                        Json::Int64(value.second);
369         }
370         return;
371       }
372 
373       // Other indexed value stats are exported as array under their key.
374       for (const auto& value : indexed_values) {
375         metadata_["trace_processor_stats"][key][value.first] =
376             Json::Int64(value.second);
377       }
378     }
379 
AddSystemTraceData(const std::string & data)380     void AddSystemTraceData(const std::string& data) {
381       system_trace_data_ += data;
382     }
383 
AddUserTraceData(const std::string & data)384     void AddUserTraceData(const std::string& data) {
385       if (user_trace_data_.empty())
386         user_trace_data_ = "[";
387       user_trace_data_ += data;
388     }
389 
390    private:
WriteHeader()391     void WriteHeader() {
392       if (!label_filter_)
393         output_->AppendString("{\"traceEvents\":[\n");
394     }
395 
WriteFooter()396     void WriteFooter() {
397       SortAndEmitAsyncEvents();
398 
399       // Filter metadata entries.
400       if (metadata_filter_) {
401         for (const auto& member : metadata_.getMemberNames()) {
402           if (!metadata_filter_(member.c_str()))
403             metadata_[member] = kStrippedArgument;
404         }
405       }
406 
407       if ((!label_filter_ || label_filter_("traceEvents")) &&
408           !user_trace_data_.empty()) {
409         user_trace_data_ += "]";
410 
411         Json::CharReaderBuilder builder;
412         auto reader =
413             std::unique_ptr<Json::CharReader>(builder.newCharReader());
414         Json::Value result;
415         if (reader->parse(user_trace_data_.data(),
416                           user_trace_data_.data() + user_trace_data_.length(),
417                           &result, nullptr)) {
418           for (const auto& event : result) {
419             WriteCommonEvent(event);
420           }
421         } else {
422           PERFETTO_DLOG(
423               "can't parse legacy user json trace export, skipping. data: %s",
424               user_trace_data_.c_str());
425         }
426       }
427 
428       std::ostringstream ss;
429       if (!label_filter_)
430         ss << "]";
431 
432       if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
433           !system_trace_data_.empty()) {
434         ss << ",\"systemTraceEvents\":\n";
435         writer_->write(Json::Value(system_trace_data_), &ss);
436       }
437 
438       if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
439         ss << ",\"metadata\":\n";
440         writer_->write(metadata_, &ss);
441       }
442 
443       if (!label_filter_)
444         ss << "}";
445 
446       output_->AppendString(ss.str());
447     }
448 
DoWriteEvent(const Json::Value & event)449     void DoWriteEvent(const Json::Value& event) {
450       std::ostringstream ss;
451       if (!first_event_)
452         ss << ",\n";
453 
454       ArgumentNameFilterPredicate argument_name_filter;
455       bool strip_args =
456           argument_filter_ &&
457           !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
458                             &argument_name_filter);
459       if ((strip_args || argument_name_filter) && event.isMember("args")) {
460         Json::Value event_copy = event;
461         if (strip_args) {
462           event_copy["args"] = kStrippedArgument;
463         } else {
464           auto& args = event_copy["args"];
465           for (const auto& member : event["args"].getMemberNames()) {
466             if (!argument_name_filter(member.c_str()))
467               args[member] = kStrippedArgument;
468           }
469         }
470         writer_->write(event_copy, &ss);
471       } else {
472         writer_->write(event, &ss);
473       }
474       first_event_ = false;
475 
476       output_->AppendString(ss.str());
477     }
478 
479     OutputWriter* output_;
480     ArgumentFilterPredicate argument_filter_;
481     MetadataFilterPredicate metadata_filter_;
482     LabelFilterPredicate label_filter_;
483 
484     std::unique_ptr<Json::StreamWriter> writer_;
485     bool first_event_;
486     Json::Value metadata_;
487     std::string system_trace_data_;
488     std::string user_trace_data_;
489     std::vector<Json::Value> async_begin_events_;
490     std::vector<Json::Value> async_instant_events_;
491     std::vector<Json::Value> async_end_events_;
492   };
493 
494   class ArgsBuilder {
495    public:
ArgsBuilder(const TraceStorage * storage)496     explicit ArgsBuilder(const TraceStorage* storage)
497         : storage_(storage),
498           empty_value_(Json::objectValue),
499           nan_value_(Json::StaticString("NaN")),
500           inf_value_(Json::StaticString("Infinity")),
501           neg_inf_value_(Json::StaticString("-Infinity")) {
502       const auto& arg_table = storage_->arg_table();
503       uint32_t count = arg_table.row_count();
504       if (count == 0) {
505         args_sets_.resize(1, empty_value_);
506         return;
507       }
508       args_sets_.resize(arg_table[count - 1].arg_set_id() + 1, empty_value_);
509 
510       for (auto it = arg_table.IterateRows(); it; ++it) {
511         ArgSetId set_id = it.arg_set_id();
512         const char* key = storage->GetString(it.key()).c_str();
513         Variadic value = storage_->GetArgValue(it.row_number().row_number());
514         AppendArg(set_id, key, VariadicToJson(value));
515       }
516       PostprocessArgs();
517     }
518 
GetArgs(ArgSetId set_id) const519     const Json::Value& GetArgs(ArgSetId set_id) const {
520       // If |set_id| was empty and added to the storage last, it may not be in
521       // args_sets_.
522       if (set_id > args_sets_.size())
523         return empty_value_;
524       return args_sets_[set_id];
525     }
526 
527    private:
VariadicToJson(Variadic variadic)528     Json::Value VariadicToJson(Variadic variadic) {
529       switch (variadic.type) {
530         case Variadic::kInt:
531           return Json::Int64(variadic.int_value);
532         case Variadic::kUint:
533           return Json::UInt64(variadic.uint_value);
534         case Variadic::kString:
535           return GetNonNullString(storage_, variadic.string_value);
536         case Variadic::kReal:
537           if (std::isnan(variadic.real_value)) {
538             return nan_value_;
539           } else if (std::isinf(variadic.real_value) &&
540                      variadic.real_value > 0) {
541             return inf_value_;
542           } else if (std::isinf(variadic.real_value) &&
543                      variadic.real_value < 0) {
544             return neg_inf_value_;
545           } else {
546             return variadic.real_value;
547           }
548         case Variadic::kPointer:
549           return base::Uint64ToHexString(variadic.pointer_value);
550         case Variadic::kBool:
551           return variadic.bool_value;
552         case Variadic::kNull:
553           return base::Uint64ToHexString(0);
554         case Variadic::kJson:
555           Json::CharReaderBuilder b;
556           auto reader = std::unique_ptr<Json::CharReader>(b.newCharReader());
557 
558           Json::Value result;
559           std::string v = GetNonNullString(storage_, variadic.json_value);
560           reader->parse(v.data(), v.data() + v.length(), &result, nullptr);
561           return result;
562       }
563       PERFETTO_FATAL("Not reached");  // For gcc.
564     }
565 
AppendArg(ArgSetId set_id,const std::string & key,const Json::Value & value)566     void AppendArg(ArgSetId set_id,
567                    const std::string& key,
568                    const Json::Value& value) {
569       Json::Value* target = &args_sets_[set_id];
570       for (base::StringSplitter parts(key, '.'); parts.Next();) {
571         if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
572           PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
573                         key.c_str(),
574                         args_sets_[set_id].toStyledString().c_str());
575           return;
576         }
577         std::string key_part = parts.cur_token();
578         size_t bracketpos = key_part.find('[');
579         if (bracketpos == std::string::npos) {  // A single item
580           target = &(*target)[key_part];
581         } else {  // A list item
582           target = &(*target)[key_part.substr(0, bracketpos)];
583           while (bracketpos != std::string::npos) {
584             // We constructed this string from an int earlier in trace_processor
585             // so it shouldn't be possible for this (or the StringToUInt32
586             // below) to fail.
587             std::string s =
588                 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
589                                                     bracketpos - 1);
590             if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
591               PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
592                             key.c_str(),
593                             args_sets_[set_id].toStyledString().c_str());
594               return;
595             }
596             std::optional<uint32_t> index = base::StringToUInt32(s);
597             if (PERFETTO_UNLIKELY(!index)) {
598               PERFETTO_ELOG("Expected to be able to extract index from %s",
599                             key_part.c_str());
600               return;
601             }
602             target = &(*target)[index.value()];
603             bracketpos = key_part.find('[', bracketpos + 1);
604           }
605         }
606       }
607       *target = value;
608     }
609 
PostprocessArgs()610     void PostprocessArgs() {
611       for (Json::Value& args : args_sets_) {
612         // Move all fields from "debug" key to upper level.
613         if (args.isMember("debug")) {
614           Json::Value debug = args["debug"];
615           args.removeMember("debug");
616           for (const auto& member : debug.getMemberNames()) {
617             args[member] = debug[member];
618           }
619         }
620 
621         // Rename source fields.
622         if (args.isMember("task")) {
623           if (args["task"].isMember("posted_from")) {
624             Json::Value posted_from = args["task"]["posted_from"];
625             args["task"].removeMember("posted_from");
626             if (posted_from.isMember("function_name")) {
627               args["src_func"] = posted_from["function_name"];
628               args["src_file"] = posted_from["file_name"];
629             } else if (posted_from.isMember("file_name")) {
630               args["src"] = posted_from["file_name"];
631             }
632           }
633           if (args["task"].empty())
634             args.removeMember("task");
635         }
636         if (args.isMember("source")) {
637           Json::Value source = args["source"];
638           if (source.isObject() && source.isMember("function_name")) {
639             args["function_name"] = source["function_name"];
640             args["file_name"] = source["file_name"];
641             args.removeMember("source");
642           }
643         }
644       }
645     }
646 
647     const TraceStorage* storage_;
648     std::vector<Json::Value> args_sets_;
649     const Json::Value empty_value_;
650     const Json::Value nan_value_;
651     const Json::Value inf_value_;
652     const Json::Value neg_inf_value_;
653   };
654 
MapUniquePidsAndTids()655   base::Status MapUniquePidsAndTids() {
656     const auto& process_table = storage_->process_table();
657     for (auto it = process_table.IterateRows(); it; ++it) {
658       UniquePid upid = it.id().value;
659       uint32_t exported_pid = it.pid();
660       auto it_and_inserted =
661           exported_pids_to_upids_.emplace(exported_pid, upid);
662       if (!it_and_inserted.second) {
663         exported_pid = NextExportedPidOrTidForDuplicates();
664         it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
665       }
666       upids_to_exported_pids_.emplace(upid, exported_pid);
667     }
668 
669     const auto& thread_table = storage_->thread_table();
670     for (auto it = thread_table.IterateRows(); it; ++it) {
671       UniqueTid utid = it.id().value;
672 
673       uint32_t exported_pid = 0;
674       std::optional<UniquePid> upid = it.upid();
675       if (upid) {
676         auto exported_pid_it = upids_to_exported_pids_.find(*upid);
677         PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
678         exported_pid = exported_pid_it->second;
679       }
680 
681       uint32_t exported_tid = it.tid();
682       auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
683           std::make_pair(exported_pid, exported_tid), utid);
684       if (!it_and_inserted.second) {
685         exported_tid = NextExportedPidOrTidForDuplicates();
686         it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
687             std::make_pair(exported_pid, exported_tid), utid);
688       }
689       utids_to_exported_pids_and_tids_.emplace(
690           utid, std::make_pair(exported_pid, exported_tid));
691     }
692     return base::OkStatus();
693   }
694 
ExportThreadNames()695   base::Status ExportThreadNames() {
696     const auto& thread_table = storage_->thread_table();
697     for (auto it = thread_table.IterateRows(); it; ++it) {
698       auto opt_name = it.name();
699       if (opt_name.has_value()) {
700         UniqueTid utid = it.id().value;
701         const char* thread_name = GetNonNullString(storage_, opt_name);
702         auto pid_and_tid = UtidToPidAndTid(utid);
703         writer_.WriteMetadataEvent("thread_name", "name", thread_name,
704                                    pid_and_tid.first, pid_and_tid.second);
705       }
706     }
707     return base::OkStatus();
708   }
709 
ExportProcessNames()710   base::Status ExportProcessNames() {
711     const auto& process_table = storage_->process_table();
712     for (auto it = process_table.IterateRows(); it; ++it) {
713       auto opt_name = it.name();
714       if (opt_name.has_value()) {
715         UniquePid upid = it.id().value;
716         const char* process_name = GetNonNullString(storage_, opt_name);
717         writer_.WriteMetadataEvent("process_name", "name", process_name,
718                                    UpidToPid(upid), /*tid=*/0);
719       }
720     }
721     return base::OkStatus();
722   }
723 
724   // For each process it writes an approximate uptime, based on the process'
725   // start time and the last slice in the entire trace. This same last slice is
726   // used with all processes, so the process could have ended earlier.
ExportProcessUptimes()727   base::Status ExportProcessUptimes() {
728     int64_t last_timestamp_ns = FindLastSliceTimestamp();
729     if (last_timestamp_ns <= 0)
730       return base::OkStatus();
731 
732     const auto& process_table = storage_->process_table();
733     for (auto it = process_table.IterateRows(); it; ++it) {
734       std::optional<int64_t> start_timestamp_ns = it.start_ts();
735       if (!start_timestamp_ns.has_value()) {
736         continue;
737       }
738 
739       UniquePid upid = it.id().value;
740       int64_t process_uptime_seconds =
741           (last_timestamp_ns - start_timestamp_ns.value()) /
742           (1000l * 1000 * 1000);
743       writer_.WriteMetadataEvent("process_uptime_seconds", "uptime",
744                                  std::to_string(process_uptime_seconds).c_str(),
745                                  UpidToPid(upid), /*tid=*/0);
746     }
747 
748     return base::OkStatus();
749   }
750 
751   // Returns the last slice's end timestamp for the entire trace. If no slices
752   // are found 0 is returned.
FindLastSliceTimestamp()753   int64_t FindLastSliceTimestamp() {
754     int64_t last_ts = 0;
755     for (auto it = storage_->slice_table().IterateRows(); it; ++it) {
756       last_ts = std::max(last_ts, it.ts() + it.dur());
757     }
758     return last_ts;
759   }
760 
ExportSlices()761   base::Status ExportSlices() {
762     const auto& slices = storage_->slice_table();
763     for (auto it = slices.IterateRows(); it; ++it) {
764       // Skip slices with empty category - these are ftrace/system slices that
765       // were also imported into the raw table and will be exported from there
766       // by trace_to_text.
767       // TODO(b/153609716): Add a src column or do_not_export flag instead.
768       if (!it.category())
769         continue;
770       auto cat = storage_->GetString(*it.category());
771       if (cat.c_str() == nullptr || cat == "binder")
772         continue;
773 
774       Json::Value event;
775       event["ts"] = Json::Int64(it.ts() / 1000);
776       event["cat"] = GetNonNullString(storage_, it.category());
777       event["name"] = GetNonNullString(storage_, it.name());
778       event["pid"] = 0;
779       event["tid"] = 0;
780 
781       std::optional<UniqueTid> legacy_utid;
782       std::string legacy_phase;
783 
784       event["args"] = args_builder_.GetArgs(it.arg_set_id());  // Makes a copy.
785       if (event["args"].isMember(kLegacyEventArgsKey)) {
786         const auto& legacy_args = event["args"][kLegacyEventArgsKey];
787 
788         if (legacy_args.isMember(kLegacyEventPassthroughUtidKey)) {
789           legacy_utid = legacy_args[kLegacyEventPassthroughUtidKey].asUInt();
790         }
791         if (legacy_args.isMember(kLegacyEventPhaseKey)) {
792           legacy_phase = legacy_args[kLegacyEventPhaseKey].asString();
793         }
794 
795         event["args"].removeMember(kLegacyEventArgsKey);
796       }
797 
798       // To prevent duplicate export of slices, only export slices on descriptor
799       // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
800       // also be present as raw events and handled by trace_to_text. Only add
801       // more track types here if they are not already covered by trace_to_text.
802       TrackId track_id = it.track_id();
803 
804       const auto& track_table = storage_->track_table();
805 
806       auto track_row_ref = *track_table.FindById(track_id);
807       auto track_args_id = track_row_ref.source_arg_set_id();
808       const Json::Value* track_args = nullptr;
809       bool legacy_chrome_track = false;
810       bool is_child_track = false;
811       if (track_args_id) {
812         track_args = &args_builder_.GetArgs(*track_args_id);
813         legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
814         is_child_track = track_args->isMember("is_root_in_scope") &&
815                          !(*track_args)["is_root_in_scope"].asBool();
816       }
817 
818       const auto& thread_track = storage_->thread_track_table();
819       const auto& process_track = storage_->process_track_table();
820       const auto& virtual_track_slices = storage_->virtual_track_slices();
821 
822       int64_t duration_ns = it.dur();
823       std::optional<int64_t> thread_ts_ns;
824       std::optional<int64_t> thread_duration_ns;
825       std::optional<int64_t> thread_instruction_count;
826       std::optional<int64_t> thread_instruction_delta;
827 
828       if (it.thread_dur()) {
829         thread_ts_ns = it.thread_ts();
830         thread_duration_ns = it.thread_dur();
831         thread_instruction_count = it.thread_instruction_count();
832         thread_instruction_delta = it.thread_instruction_delta();
833       } else {
834         SliceId id = it.id();
835         std::optional<uint32_t> vtrack_slice_row =
836             virtual_track_slices.FindRowForSliceId(id);
837         if (vtrack_slice_row) {
838           thread_ts_ns =
839               virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
840           thread_duration_ns =
841               virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
842           thread_instruction_count =
843               virtual_track_slices
844                   .thread_instruction_counts()[*vtrack_slice_row];
845           thread_instruction_delta =
846               virtual_track_slices
847                   .thread_instruction_deltas()[*vtrack_slice_row];
848         }
849       }
850 
851       auto tt_rr = thread_track.FindById(track_id);
852       if (tt_rr && !is_child_track) {
853         // Synchronous (thread) slice or instant event.
854         UniqueTid utid = tt_rr->utid();
855         auto pid_and_tid = UtidToPidAndTid(utid);
856         event["pid"] = Json::Int(pid_and_tid.first);
857         event["tid"] = Json::Int(pid_and_tid.second);
858 
859         if (duration_ns == 0) {
860           if (legacy_phase.empty()) {
861             // Use "I" instead of "i" phase for backwards-compat with old
862             // consumers.
863             event["ph"] = "I";
864           } else {
865             event["ph"] = legacy_phase;
866           }
867           if (thread_ts_ns && thread_ts_ns > 0) {
868             event["tts"] = Json::Int64(*thread_ts_ns / 1000);
869           }
870           if (thread_instruction_count && *thread_instruction_count > 0) {
871             event["ticount"] = Json::Int64(*thread_instruction_count);
872           }
873           event["s"] = "t";
874         } else {
875           if (duration_ns > 0) {
876             event["ph"] = "X";
877             event["dur"] = Json::Int64(duration_ns / 1000);
878           } else {
879             // If the slice didn't finish, the duration may be negative. Only
880             // write a begin event without end event in this case.
881             event["ph"] = "B";
882           }
883           if (thread_ts_ns && *thread_ts_ns > 0) {
884             event["tts"] = Json::Int64(*thread_ts_ns / 1000);
885             // Only write thread duration for completed events.
886             if (duration_ns > 0 && thread_duration_ns)
887               event["tdur"] = Json::Int64(*thread_duration_ns / 1000);
888           }
889           if (thread_instruction_count && *thread_instruction_count > 0) {
890             event["ticount"] = Json::Int64(*thread_instruction_count);
891             // Only write thread instruction delta for completed events.
892             if (duration_ns > 0 && thread_instruction_delta)
893               event["tidelta"] = Json::Int64(*thread_instruction_delta);
894           }
895         }
896         writer_.WriteCommonEvent(event);
897       } else if (is_child_track ||
898                  (legacy_chrome_track && track_args->isMember("trace_id"))) {
899         // Async event slice.
900         auto pt_rr = process_track.FindById(track_id);
901         if (legacy_chrome_track) {
902           // Legacy async tracks are always process-associated and have args.
903           PERFETTO_DCHECK(pt_rr);
904           PERFETTO_DCHECK(track_args);
905           UniquePid upid = pt_rr->upid();
906           uint32_t exported_pid = UpidToPid(upid);
907           event["pid"] = Json::Int(exported_pid);
908           event["tid"] =
909               Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
910                                     : exported_pid);
911 
912           // Preserve original event IDs for legacy tracks. This is so that e.g.
913           // memory dump IDs show up correctly in the JSON trace.
914           PERFETTO_DCHECK(track_args->isMember("trace_id"));
915           PERFETTO_DCHECK(track_args->isMember("trace_id_is_process_scoped"));
916           PERFETTO_DCHECK(track_args->isMember("source_scope"));
917           uint64_t trace_id =
918               static_cast<uint64_t>((*track_args)["trace_id"].asInt64());
919           std::string source_scope = (*track_args)["source_scope"].asString();
920           if (!source_scope.empty())
921             event["scope"] = source_scope;
922           bool trace_id_is_process_scoped =
923               (*track_args)["trace_id_is_process_scoped"].asBool();
924           if (trace_id_is_process_scoped) {
925             event["id2"]["local"] = base::Uint64ToHexString(trace_id);
926           } else {
927             // Some legacy importers don't understand "id2" fields, so we use
928             // the "usually" global "id" field instead. This works as long as
929             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
930             // "LOCAL_ID_PHASES" in catapult.
931             event["id"] = base::Uint64ToHexString(trace_id);
932           }
933         } else {
934           if (tt_rr) {
935             UniqueTid utid = tt_rr->utid();
936             auto pid_and_tid = UtidToPidAndTid(utid);
937             event["pid"] = Json::Int(pid_and_tid.first);
938             event["tid"] = Json::Int(pid_and_tid.second);
939             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
940           } else if (pt_rr) {
941             uint32_t upid = pt_rr->upid();
942             uint32_t exported_pid = UpidToPid(upid);
943             event["pid"] = Json::Int(exported_pid);
944             event["tid"] =
945                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
946                                       : exported_pid);
947             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
948           } else {
949             if (legacy_utid) {
950               auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
951               event["pid"] = Json::Int(pid_and_tid.first);
952               event["tid"] = Json::Int(pid_and_tid.second);
953             }
954 
955             // Some legacy importers don't understand "id2" fields, so we use
956             // the "usually" global "id" field instead. This works as long as
957             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
958             // "LOCAL_ID_PHASES" in catapult.
959             event["id"] = base::Uint64ToHexString(track_id.value);
960           }
961         }
962 
963         if (thread_ts_ns && *thread_ts_ns > 0) {
964           event["tts"] = Json::Int64(*thread_ts_ns / 1000);
965           event["use_async_tts"] = Json::Int(1);
966         }
967         if (thread_instruction_count && *thread_instruction_count > 0) {
968           event["ticount"] = Json::Int64(*thread_instruction_count);
969           event["use_async_tts"] = Json::Int(1);
970         }
971 
972         if (duration_ns == 0) {
973           if (legacy_phase.empty()) {
974             // Instant async event.
975             event["ph"] = "n";
976             writer_.AddAsyncInstantEvent(event);
977           } else {
978             // Async step events.
979             event["ph"] = legacy_phase;
980             writer_.AddAsyncBeginEvent(event);
981           }
982         } else {  // Async start and end.
983           event["ph"] = legacy_phase.empty() ? "b" : legacy_phase;
984           writer_.AddAsyncBeginEvent(event);
985           // If the slice didn't finish, the duration may be negative. Don't
986           // write the end event in this case.
987           if (duration_ns > 0) {
988             event["ph"] = legacy_phase.empty() ? "e" : "F";
989             event["ts"] = Json::Int64((it.ts() + duration_ns) / 1000);
990             if (thread_ts_ns && thread_duration_ns && *thread_ts_ns > 0) {
991               event["tts"] =
992                   Json::Int64((*thread_ts_ns + *thread_duration_ns) / 1000);
993             }
994             if (thread_instruction_count && thread_instruction_delta &&
995                 *thread_instruction_count > 0) {
996               event["ticount"] = Json::Int64(
997                   (*thread_instruction_count + *thread_instruction_delta));
998             }
999             event["args"].clear();
1000             writer_.AddAsyncEndEvent(event);
1001           }
1002         }
1003       } else {
1004         // Global or process-scoped instant event.
1005         PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
1006         if (duration_ns != 0) {
1007           // We don't support exporting slices on the default global or process
1008           // track to JSON (JSON only supports instant events on these tracks).
1009           PERFETTO_DLOG(
1010               "skipping non-instant slice on global or process track");
1011         } else {
1012           if (legacy_phase.empty()) {
1013             // Use "I" instead of "i" phase for backwards-compat with old
1014             // consumers.
1015             event["ph"] = "I";
1016           } else {
1017             event["ph"] = legacy_phase;
1018           }
1019 
1020           auto pt_rr = process_track.FindById(track_id);
1021           if (pt_rr.has_value()) {
1022             UniquePid upid = pt_rr->upid();
1023             uint32_t exported_pid = UpidToPid(upid);
1024             event["pid"] = Json::Int(exported_pid);
1025             event["tid"] =
1026                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
1027                                       : exported_pid);
1028             event["s"] = "p";
1029           } else {
1030             event["s"] = "g";
1031           }
1032           writer_.WriteCommonEvent(event);
1033         }
1034       }
1035     }
1036     return base::OkStatus();
1037   }
1038 
CreateFlowEventV1(uint32_t flow_id,SliceId slice_id,const std::string & name,const std::string & cat,Json::Value args,bool flow_begin)1039   std::optional<Json::Value> CreateFlowEventV1(uint32_t flow_id,
1040                                                SliceId slice_id,
1041                                                const std::string& name,
1042                                                const std::string& cat,
1043                                                Json::Value args,
1044                                                bool flow_begin) {
1045     const auto& slices = storage_->slice_table();
1046     const auto& thread_tracks = storage_->thread_track_table();
1047 
1048     auto opt_slice_rr = slices.FindById(slice_id);
1049     if (!opt_slice_rr)
1050       return std::nullopt;
1051     auto slice_rr = opt_slice_rr.value();
1052 
1053     TrackId track_id = slice_rr.track_id();
1054     auto opt_ttrr = thread_tracks.FindById(track_id);
1055     // catapult only supports flow events attached to thread-track slices
1056     if (!opt_ttrr)
1057       return std::nullopt;
1058 
1059     auto pid_and_tid = UtidToPidAndTid(opt_ttrr->utid());
1060     Json::Value event;
1061     event["id"] = flow_id;
1062     event["pid"] = Json::Int(pid_and_tid.first);
1063     event["tid"] = Json::Int(pid_and_tid.second);
1064     event["cat"] = cat;
1065     event["name"] = name;
1066     event["ph"] = (flow_begin ? "s" : "f");
1067     event["ts"] = Json::Int64(slice_rr.ts() / 1000);
1068     if (!flow_begin) {
1069       event["bp"] = "e";
1070     }
1071     event["args"] = std::move(args);
1072     return std::move(event);
1073   }
1074 
ExportFlows()1075   base::Status ExportFlows() {
1076     const auto& flow_table = storage_->flow_table();
1077     const auto& slice_table = storage_->slice_table();
1078     for (auto it = flow_table.IterateRows(); it; ++it) {
1079       SliceId slice_out = it.slice_out();
1080       SliceId slice_in = it.slice_in();
1081       uint32_t arg_set_id = it.arg_set_id();
1082 
1083       std::string cat;
1084       std::string name;
1085       auto args = args_builder_.GetArgs(arg_set_id);
1086       if (arg_set_id != kInvalidArgSetId) {
1087         cat = args["cat"].asString();
1088         name = args["name"].asString();
1089         // Don't export these args since they are only used for this export and
1090         // weren't part of the original event.
1091         args.removeMember("name");
1092         args.removeMember("cat");
1093       } else {
1094         auto rr = slice_table.FindById(slice_out);
1095         PERFETTO_DCHECK(rr.has_value());
1096         cat = GetNonNullString(storage_, rr->category());
1097         name = GetNonNullString(storage_, rr->name());
1098       }
1099 
1100       uint32_t i = it.row_number().row_number();
1101       auto out_event = CreateFlowEventV1(i, slice_out, name, cat, args,
1102                                          /* flow_begin = */ true);
1103       auto in_event = CreateFlowEventV1(i, slice_in, name, cat, std::move(args),
1104                                         /* flow_begin = */ false);
1105 
1106       if (out_event && in_event) {
1107         writer_.WriteCommonEvent(out_event.value());
1108         writer_.WriteCommonEvent(in_event.value());
1109       }
1110     }
1111     return base::OkStatus();
1112   }
1113 
ConvertLegacyRawEventToJson(const tables::RawTable::ConstIterator & it)1114   Json::Value ConvertLegacyRawEventToJson(
1115       const tables::RawTable::ConstIterator& it) {
1116     Json::Value event;
1117     event["ts"] = Json::Int64(it.ts() / 1000);
1118 
1119     UniqueTid utid = static_cast<UniqueTid>(it.utid());
1120     auto pid_and_tid = UtidToPidAndTid(utid);
1121     event["pid"] = Json::Int(pid_and_tid.first);
1122     event["tid"] = Json::Int(pid_and_tid.second);
1123 
1124     // Raw legacy events store all other params in the arg set. Make a copy of
1125     // the converted args here, parse, and then remove the legacy params.
1126     event["args"] = args_builder_.GetArgs(it.arg_set_id());
1127     const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1128 
1129     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1130     event["cat"] = legacy_args[kLegacyEventCategoryKey];
1131 
1132     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1133     event["name"] = legacy_args[kLegacyEventNameKey];
1134 
1135     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1136     event["ph"] = legacy_args[kLegacyEventPhaseKey];
1137 
1138     // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1139     // which may be removed in trace processor if it is empty.
1140     if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1141         !event["args"].isMember("snapshot")) {
1142       event["args"]["snapshot"] = Json::Value(Json::objectValue);
1143     }
1144 
1145     if (legacy_args.isMember(kLegacyEventDurationNsKey))
1146       event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1147 
1148     if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1149       event["tts"] =
1150           legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1151     }
1152 
1153     if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1154       event["tdur"] =
1155           legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1156     }
1157 
1158     if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1159       event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1160 
1161     if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1162       event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1163 
1164     if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1165       event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1166 
1167     if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1168       event["id"] = base::Uint64ToHexString(
1169           legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1170     }
1171 
1172     if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1173       event["id2"]["global"] = base::Uint64ToHexString(
1174           legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1175     }
1176 
1177     if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1178       event["id2"]["local"] = base::Uint64ToHexString(
1179           legacy_args[kLegacyEventLocalIdKey].asUInt64());
1180     }
1181 
1182     if (legacy_args.isMember(kLegacyEventIdScopeKey))
1183       event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1184 
1185     event["args"].removeMember(kLegacyEventArgsKey);
1186 
1187     return event;
1188   }
1189 
ExportRawEvents()1190   base::Status ExportRawEvents() {
1191     std::optional<StringId> raw_legacy_event_key_id =
1192         storage_->string_pool().GetId("track_event.legacy_event");
1193     std::optional<StringId> raw_legacy_system_trace_event_id =
1194         storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1195     std::optional<StringId> raw_legacy_user_trace_event_id =
1196         storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1197     std::optional<StringId> raw_chrome_metadata_event_id =
1198         storage_->string_pool().GetId("chrome_event.metadata");
1199 
1200     const auto& events = storage_->raw_table();
1201     for (auto it = events.IterateRows(); it; ++it) {
1202       if (raw_legacy_event_key_id && it.name() == *raw_legacy_event_key_id) {
1203         Json::Value event = ConvertLegacyRawEventToJson(it);
1204         writer_.WriteCommonEvent(event);
1205       } else if (raw_legacy_system_trace_event_id &&
1206                  it.name() == *raw_legacy_system_trace_event_id) {
1207         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1208         PERFETTO_DCHECK(args.isMember("data"));
1209         writer_.AddSystemTraceData(args["data"].asString());
1210       } else if (raw_legacy_user_trace_event_id &&
1211                  it.name() == *raw_legacy_user_trace_event_id) {
1212         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1213         PERFETTO_DCHECK(args.isMember("data"));
1214         writer_.AddUserTraceData(args["data"].asString());
1215       } else if (raw_chrome_metadata_event_id &&
1216                  it.name() == *raw_chrome_metadata_event_id) {
1217         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1218         writer_.MergeMetadata(args);
1219       }
1220     }
1221     return base::OkStatus();
1222   }
1223 
1224   class MergedProfileSamplesEmitter {
1225    public:
1226     // The TraceFormatWriter must outlive this instance.
MergedProfileSamplesEmitter(TraceFormatWriter & writer)1227     explicit MergedProfileSamplesEmitter(TraceFormatWriter& writer)
1228         : writer_(writer) {}
1229 
1230     MergedProfileSamplesEmitter(const MergedProfileSamplesEmitter&) = delete;
1231     MergedProfileSamplesEmitter& operator=(const MergedProfileSamplesEmitter&) =
1232         delete;
1233     MergedProfileSamplesEmitter& operator=(
1234         MergedProfileSamplesEmitter&& value) = delete;
1235 
AddEventForUtid(UniqueTid utid,int64_t ts,CallsiteId callsite_id,const Json::Value & event)1236     uint64_t AddEventForUtid(UniqueTid utid,
1237                              int64_t ts,
1238                              CallsiteId callsite_id,
1239                              const Json::Value& event) {
1240       auto current_sample = current_events_.find(utid);
1241 
1242       // If there's a current entry for our thread and it matches the callsite
1243       // of the new sample, update the entry with the new timestamp. Otherwise
1244       // create a new entry.
1245       if (current_sample != current_events_.end() &&
1246           current_sample->second.callsite_id() == callsite_id) {
1247         current_sample->second.UpdateWithNewSample(ts);
1248         return current_sample->second.event_id();
1249       }
1250 
1251       if (current_sample != current_events_.end()) {
1252         current_events_.erase(current_sample);
1253       }
1254 
1255       auto new_entry = current_events_.emplace(
1256           std::piecewise_construct, std::forward_as_tuple(utid),
1257           std::forward_as_tuple(writer_, callsite_id, ts, event));
1258       return new_entry.first->second.event_id();
1259     }
1260 
GenerateNewEventId()1261     static uint64_t GenerateNewEventId() {
1262       // "n"-phase events are nestable async events which get tied together
1263       // with their id, so we need to give each one a unique ID as we only
1264       // want the samples to show up on their own track in the trace-viewer
1265       // but not nested together (unless they're nested under a merged event).
1266       static size_t g_id_counter = 0;
1267       return ++g_id_counter;
1268     }
1269 
1270    private:
1271     class Sample {
1272      public:
Sample(TraceFormatWriter & writer,CallsiteId callsite_id,int64_t ts,Json::Value event)1273       Sample(TraceFormatWriter& writer,
1274              CallsiteId callsite_id,
1275              int64_t ts,
1276              Json::Value event)
1277           : writer_(writer),
1278             callsite_id_(callsite_id),
1279             begin_ts_(ts),
1280             end_ts_(ts),
1281             event_(std::move(event)),
1282             event_id_(MergedProfileSamplesEmitter::GenerateNewEventId()),
1283             sample_count_(1) {}
1284 
1285       Sample(const Sample&) = delete;
1286       Sample& operator=(const Sample&) = delete;
1287 
1288       Sample(Sample&&) = delete;
1289       Sample& operator=(Sample&& value) = delete;
1290 
~Sample()1291       ~Sample() {
1292         // No point writing a merged event if we only got a single sample
1293         // as ExportCpuProfileSamples will already be writing the instant event.
1294         if (sample_count_ == 1)
1295           return;
1296 
1297         event_["id"] = base::Uint64ToHexString(event_id_);
1298 
1299         // Write the BEGIN event.
1300         event_["ph"] = "b";
1301         // We subtract 1us as a workaround for the first async event not
1302         // nesting underneath the parent event if the timestamp is identical.
1303         int64_t begin_in_us_ = begin_ts_ / 1000;
1304         event_["ts"] = Json::Int64(std::min(begin_in_us_ - 1, begin_in_us_));
1305         writer_.WriteCommonEvent(event_);
1306 
1307         // Write the END event.
1308         event_["ph"] = "e";
1309         event_["ts"] = Json::Int64(end_ts_ / 1000);
1310         // No need for args for the end event; remove them to save some space.
1311         event_["args"].clear();
1312         writer_.WriteCommonEvent(event_);
1313       }
1314 
UpdateWithNewSample(int64_t ts)1315       void UpdateWithNewSample(int64_t ts) {
1316         // We assume samples for a given thread will appear in timestamp
1317         // order; if this assumption stops holding true, we'll have to sort the
1318         // samples first.
1319         if (ts < end_ts_ || begin_ts_ > ts) {
1320           PERFETTO_ELOG(
1321               "Got an timestamp out of sequence while merging stack samples "
1322               "during JSON export!\n");
1323           PERFETTO_DCHECK(false);
1324         }
1325 
1326         end_ts_ = ts;
1327         sample_count_++;
1328       }
1329 
event_id() const1330       uint64_t event_id() const { return event_id_; }
callsite_id() const1331       CallsiteId callsite_id() const { return callsite_id_; }
1332 
1333       TraceFormatWriter& writer_;
1334       CallsiteId callsite_id_;
1335       int64_t begin_ts_;
1336       int64_t end_ts_;
1337       Json::Value event_;
1338       uint64_t event_id_;
1339       size_t sample_count_;
1340     };
1341 
1342     std::unordered_map<UniqueTid, Sample> current_events_;
1343     TraceFormatWriter& writer_;
1344   };
1345 
ExportCpuProfileSamples()1346   base::Status ExportCpuProfileSamples() {
1347     MergedProfileSamplesEmitter merged_sample_emitter(writer_);
1348 
1349     const tables::CpuProfileStackSampleTable& samples =
1350         storage_->cpu_profile_stack_sample_table();
1351     for (auto it = samples.IterateRows(); it; ++it) {
1352       Json::Value event;
1353       event["ts"] = Json::Int64(it.ts() / 1000);
1354 
1355       UniqueTid utid = static_cast<UniqueTid>(it.utid());
1356       auto pid_and_tid = UtidToPidAndTid(utid);
1357       event["pid"] = Json::Int(pid_and_tid.first);
1358       event["tid"] = Json::Int(pid_and_tid.second);
1359 
1360       event["ph"] = "n";
1361       event["cat"] = "disabled-by-default-cpu_profiler";
1362       event["name"] = "StackCpuSampling";
1363       event["s"] = "t";
1364 
1365       // Add a dummy thread timestamp to this event to match the format of
1366       // instant events. Useful in the UI to view args of a selected group of
1367       // samples.
1368       event["tts"] = Json::Int64(1);
1369 
1370       const auto& callsites = storage_->stack_profile_callsite_table();
1371       const auto& frames = storage_->stack_profile_frame_table();
1372       const auto& mappings = storage_->stack_profile_mapping_table();
1373 
1374       std::vector<std::string> callstack;
1375       std::optional<CallsiteId> opt_callsite_id = it.callsite_id();
1376 
1377       while (opt_callsite_id) {
1378         CallsiteId callsite_id = *opt_callsite_id;
1379         auto callsite_row = *callsites.FindById(callsite_id);
1380 
1381         FrameId frame_id = callsite_row.frame_id();
1382         auto frame_row = *frames.FindById(frame_id);
1383 
1384         MappingId mapping_id = frame_row.mapping();
1385         auto mapping_row = *mappings.FindById(mapping_id);
1386 
1387         NullTermStringView symbol_name;
1388         auto opt_symbol_set_id = frame_row.symbol_set_id();
1389         if (opt_symbol_set_id) {
1390           symbol_name = storage_->GetString(
1391               storage_->symbol_table()[*opt_symbol_set_id].name());
1392         }
1393 
1394         base::StackString<1024> frame_entry(
1395             "%s - %s [%s]\n",
1396             (symbol_name.empty()
1397                  ? base::Uint64ToHexString(
1398                        static_cast<uint64_t>(frame_row.rel_pc()))
1399                        .c_str()
1400                  : symbol_name.c_str()),
1401             GetNonNullString(storage_, mapping_row.name()),
1402             GetNonNullString(storage_, mapping_row.build_id()));
1403 
1404         callstack.emplace_back(frame_entry.ToStdString());
1405 
1406         opt_callsite_id = callsite_row.parent_id();
1407       }
1408 
1409       std::string merged_callstack;
1410       for (auto entry = callstack.rbegin(); entry != callstack.rend();
1411            ++entry) {
1412         merged_callstack += *entry;
1413       }
1414 
1415       event["args"]["frames"] = merged_callstack;
1416       event["args"]["process_priority"] = it.process_priority();
1417 
1418       // TODO(oysteine): Used for backwards compatibility with the memlog
1419       // pipeline, should remove once we've switched to looking directly at the
1420       // tid.
1421       event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1422 
1423       // Emit duration events for adjacent samples with the same callsite.
1424       // For now, only do this when the trace has already been symbolized i.e.
1425       // are not directly output by Chrome, to avoid interfering with other
1426       // processing pipelines.
1427       std::optional<CallsiteId> opt_current_callsite_id = it.callsite_id();
1428 
1429       if (opt_current_callsite_id && storage_->symbol_table().row_count() > 0) {
1430         uint64_t parent_event_id = merged_sample_emitter.AddEventForUtid(
1431             utid, it.ts(), *opt_current_callsite_id, event);
1432         event["id"] = base::Uint64ToHexString(parent_event_id);
1433       } else {
1434         event["id"] = base::Uint64ToHexString(
1435             MergedProfileSamplesEmitter::GenerateNewEventId());
1436       }
1437 
1438       writer_.WriteCommonEvent(event);
1439     }
1440 
1441     return base::OkStatus();
1442   }
1443 
ExportMetadata()1444   base::Status ExportMetadata() {
1445     const auto& trace_metadata = storage_->metadata_table();
1446 
1447     // Create a mapping from key string ids to keys.
1448     std::unordered_map<StringId, metadata::KeyId> key_map;
1449     for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1450       auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1451       key_map[id] = static_cast<metadata::KeyId>(i);
1452     }
1453 
1454     for (auto it = trace_metadata.IterateRows(); it; ++it) {
1455       auto key_it = key_map.find(it.name());
1456       // Skip exporting dynamic entries; the cr-xxx entries that come from
1457       // the ChromeMetadata proto message are already exported from the raw
1458       // table.
1459       if (key_it == key_map.end())
1460         continue;
1461 
1462       // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1463       // exhaustive list of cases, even if there's a default case.
1464       metadata::KeyId key = key_it->second;
1465       switch (static_cast<size_t>(key)) {
1466         case metadata::benchmark_description:
1467           writer_.AppendTelemetryMetadataString(
1468               "benchmarkDescriptions",
1469               storage_->string_pool().Get(*it.str_value()).c_str());
1470           break;
1471 
1472         case metadata::benchmark_name:
1473           writer_.AppendTelemetryMetadataString(
1474               "benchmarks",
1475               storage_->string_pool().Get(*it.str_value()).c_str());
1476           break;
1477 
1478         case metadata::benchmark_start_time_us:
1479           writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1480                                                 *it.int_value());
1481           break;
1482 
1483         case metadata::benchmark_had_failures:
1484           writer_.AppendTelemetryMetadataBool("hadFailures", *it.int_value());
1485           break;
1486 
1487         case metadata::benchmark_label:
1488           writer_.AppendTelemetryMetadataString(
1489               "labels", storage_->string_pool().Get(*it.str_value()).c_str());
1490           break;
1491 
1492         case metadata::benchmark_story_name:
1493           writer_.AppendTelemetryMetadataString(
1494               "stories", storage_->string_pool().Get(*it.str_value()).c_str());
1495           break;
1496 
1497         case metadata::benchmark_story_run_index:
1498           writer_.AppendTelemetryMetadataInt("storysetRepeats",
1499                                              *it.int_value());
1500           break;
1501 
1502         case metadata::benchmark_story_run_time_us:
1503           writer_.SetTelemetryMetadataTimestamp("traceStart", *it.int_value());
1504           break;
1505 
1506         case metadata::benchmark_story_tags:  // repeated
1507           writer_.AppendTelemetryMetadataString(
1508               "storyTags",
1509               storage_->string_pool().Get(*it.str_value()).c_str());
1510           break;
1511 
1512         default:
1513           PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1514           break;
1515       }
1516     }
1517     return base::OkStatus();
1518   }
1519 
ExportStats()1520   base::Status ExportStats() {
1521     const auto& stats = storage_->stats();
1522 
1523     for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1524       if (stats::kTypes[idx] == stats::kSingle) {
1525         writer_.SetStats(stats::kNames[idx], stats[idx].value);
1526       } else {
1527         PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1528         writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1529       }
1530     }
1531 
1532     return base::OkStatus();
1533   }
1534 
ExportMemorySnapshots()1535   base::Status ExportMemorySnapshots() {
1536     const auto& memory_snapshots = storage_->memory_snapshot_table();
1537     std::optional<StringId> private_footprint_id =
1538         storage_->string_pool().GetId("chrome.private_footprint_kb");
1539     std::optional<StringId> peak_resident_set_id =
1540         storage_->string_pool().GetId("chrome.peak_resident_set_kb");
1541 
1542     std::optional<StringId> process_stats =
1543         storage_->string_pool().GetId("chrome_process_stats");
1544 
1545     for (auto sit = memory_snapshots.IterateRows(); sit; ++sit) {
1546       Json::Value event_base;
1547 
1548       event_base["ph"] = "v";
1549       event_base["cat"] = "disabled-by-default-memory-infra";
1550       auto snapshot_id = sit.id();
1551       event_base["id"] = base::Uint64ToHexString(snapshot_id.value);
1552       int64_t snapshot_ts = sit.timestamp();
1553       event_base["ts"] = Json::Int64(snapshot_ts / 1000);
1554       // TODO(crbug:1116359): Add dump type to the snapshot proto
1555       // to properly fill event_base["name"]
1556       event_base["name"] = "periodic_interval";
1557       event_base["args"]["dumps"]["level_of_detail"] =
1558           GetNonNullString(storage_, sit.detail_level());
1559 
1560       // Export OS dump events for processes with relevant data.
1561       const auto& process_table = storage_->process_table();
1562       const auto& track_table = storage_->track_table();
1563       for (auto pit = process_table.IterateRows(); pit; ++pit) {
1564         Json::Value event = FillInProcessEventDetails(event_base, pit.pid());
1565         Json::Value& totals = event["args"]["dumps"]["process_totals"];
1566 
1567         for (auto it = track_table.IterateRows(); it; ++it) {
1568           auto arg_set_id = it.dimension_arg_set_id();
1569           if (!arg_set_id) {
1570             continue;
1571           }
1572           if (it.classification() != process_stats) {
1573             continue;
1574           }
1575           uint64_t upid = args_builder_.GetArgs(*arg_set_id)["upid"].asUInt64();
1576           if (upid != pit.id().value) {
1577             continue;
1578           }
1579           TrackId track_id = it.id();
1580           if (private_footprint_id && (it.name() == private_footprint_id)) {
1581             totals["private_footprint_bytes"] = base::Uint64ToHexStringNoPrefix(
1582                 GetCounterValue(track_id, snapshot_ts));
1583           } else if (peak_resident_set_id &&
1584                      (it.name() == peak_resident_set_id)) {
1585             totals["peak_resident_set_size"] = base::Uint64ToHexStringNoPrefix(
1586                 GetCounterValue(track_id, snapshot_ts));
1587           }
1588         }
1589 
1590         auto process_args_id = pit.arg_set_id();
1591         if (process_args_id) {
1592           const Json::Value* process_args =
1593               &args_builder_.GetArgs(process_args_id);
1594           if (process_args->isMember("is_peak_rss_resettable")) {
1595             totals["is_peak_rss_resettable"] =
1596                 (*process_args)["is_peak_rss_resettable"];
1597           }
1598         }
1599 
1600         const auto& smaps_table = storage_->profiler_smaps_table();
1601         // Do not create vm_regions without memory maps, since catapult expects
1602         // to have rows.
1603         Json::Value* smaps =
1604             smaps_table.row_count() > 0
1605                 ? &event["args"]["dumps"]["process_mmaps"]["vm_regions"]
1606                 : nullptr;
1607         for (auto it = smaps_table.IterateRows(); it; ++it) {
1608           if (it.upid() != pit.id().value)
1609             continue;
1610           if (it.ts() != snapshot_ts)
1611             continue;
1612           Json::Value region;
1613           region["mf"] = GetNonNullString(storage_, it.file_name());
1614           region["pf"] = Json::Int64(it.protection_flags());
1615           region["sa"] = base::Uint64ToHexStringNoPrefix(
1616               static_cast<uint64_t>(it.start_address()));
1617           region["sz"] = base::Uint64ToHexStringNoPrefix(
1618               static_cast<uint64_t>(it.size_kb()) * 1024);
1619           region["ts"] = Json::Int64(it.module_timestamp());
1620           region["id"] = GetNonNullString(storage_, it.module_debugid());
1621           region["df"] = GetNonNullString(storage_, it.module_debug_path());
1622           region["bs"]["pc"] = base::Uint64ToHexStringNoPrefix(
1623               static_cast<uint64_t>(it.private_clean_resident_kb()) * 1024);
1624           region["bs"]["pd"] = base::Uint64ToHexStringNoPrefix(
1625               static_cast<uint64_t>(it.private_dirty_kb()) * 1024);
1626           region["bs"]["pss"] = base::Uint64ToHexStringNoPrefix(
1627               static_cast<uint64_t>(it.proportional_resident_kb()) * 1024);
1628           region["bs"]["sc"] = base::Uint64ToHexStringNoPrefix(
1629               static_cast<uint64_t>(it.shared_clean_resident_kb()) * 1024);
1630           region["bs"]["sd"] = base::Uint64ToHexStringNoPrefix(
1631               static_cast<uint64_t>(it.shared_dirty_resident_kb()) * 1024);
1632           region["bs"]["sw"] = base::Uint64ToHexStringNoPrefix(
1633               static_cast<uint64_t>(it.swap_kb()) * 1024);
1634           smaps->append(region);
1635         }
1636 
1637         if (!totals.empty() || (smaps && !smaps->empty()))
1638           writer_.WriteCommonEvent(event);
1639       }
1640 
1641       // Export chrome dump events for process snapshots in current memory
1642       // snapshot.
1643       const auto& process_snapshots = storage_->process_memory_snapshot_table();
1644 
1645       for (auto psit = process_snapshots.IterateRows(); psit; ++psit) {
1646         if (psit.snapshot_id() != snapshot_id)
1647           continue;
1648 
1649         auto process_snapshot_id = psit.id();
1650         uint32_t pid = UpidToPid(psit.upid());
1651 
1652         // Shared memory nodes are imported into a fake process with pid 0.
1653         // Catapult expects them to be associated with one of the real processes
1654         // of the snapshot, so we choose the first one we can find and replace
1655         // the pid.
1656         if (pid == 0) {
1657           for (auto iit = process_snapshots.IterateRows(); iit; ++iit) {
1658             if (iit.snapshot_id() != snapshot_id)
1659               continue;
1660             uint32_t new_pid = UpidToPid(iit.upid());
1661             if (new_pid != 0) {
1662               pid = new_pid;
1663               break;
1664             }
1665           }
1666         }
1667 
1668         Json::Value event = FillInProcessEventDetails(event_base, pid);
1669 
1670         const auto& sn = storage_->memory_snapshot_node_table();
1671 
1672         for (auto it = sn.IterateRows(); it; ++it) {
1673           if (it.process_snapshot_id() != process_snapshot_id) {
1674             continue;
1675           }
1676           const char* path = GetNonNullString(storage_, it.path());
1677           event["args"]["dumps"]["allocators"][path]["guid"] =
1678               base::Uint64ToHexStringNoPrefix(
1679                   static_cast<uint64_t>(it.id().value));
1680           if (it.size()) {
1681             AddAttributeToMemoryNode(&event, path, "size", it.size(), "bytes");
1682           }
1683           if (it.effective_size()) {
1684             AddAttributeToMemoryNode(&event, path, "effective_size",
1685                                      it.effective_size(), "bytes");
1686           }
1687 
1688           auto node_args_id = it.arg_set_id();
1689           if (!node_args_id)
1690             continue;
1691           const Json::Value* node_args =
1692               &args_builder_.GetArgs(node_args_id.value());
1693           for (const auto& arg_name : node_args->getMemberNames()) {
1694             const Json::Value& arg_value = (*node_args)[arg_name]["value"];
1695             if (arg_value.empty())
1696               continue;
1697             if (arg_value.isString()) {
1698               AddAttributeToMemoryNode(&event, path, arg_name,
1699                                        arg_value.asString());
1700             } else if (arg_value.isInt64()) {
1701               Json::Value unit = (*node_args)[arg_name]["unit"];
1702               if (unit.empty())
1703                 unit = "unknown";
1704               AddAttributeToMemoryNode(&event, path, arg_name,
1705                                        arg_value.asInt64(), unit.asString());
1706             }
1707           }
1708         }
1709 
1710         const auto& snapshot_edges = storage_->memory_snapshot_edge_table();
1711         for (auto it = snapshot_edges.IterateRows(); it; ++it) {
1712           SnapshotNodeId source_node_id = it.source_node_id();
1713           auto source_node_rr = *sn.FindById(source_node_id);
1714 
1715           if (source_node_rr.process_snapshot_id() != process_snapshot_id) {
1716             continue;
1717           }
1718           Json::Value edge;
1719           edge["source"] =
1720               base::Uint64ToHexStringNoPrefix(it.source_node_id().value);
1721           edge["target"] =
1722               base::Uint64ToHexStringNoPrefix(it.target_node_id().value);
1723           edge["importance"] = Json::Int(it.importance());
1724           edge["type"] = "ownership";
1725           event["args"]["dumps"]["allocators_graph"].append(edge);
1726         }
1727         writer_.WriteCommonEvent(event);
1728       }
1729     }
1730     return base::OkStatus();
1731   }
1732 
UpidToPid(UniquePid upid)1733   uint32_t UpidToPid(UniquePid upid) {
1734     auto pid_it = upids_to_exported_pids_.find(upid);
1735     PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1736     return pid_it->second;
1737   }
1738 
UtidToPidAndTid(UniqueTid utid)1739   std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1740     auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1741     PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1742     return pid_and_tid_it->second;
1743   }
1744 
NextExportedPidOrTidForDuplicates()1745   uint32_t NextExportedPidOrTidForDuplicates() {
1746     // Ensure that the exported substitute value does not represent a valid
1747     // pid/tid. This would be very unlikely in practice.
1748     while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1749       next_exported_pid_or_tid_for_duplicates_--;
1750     return next_exported_pid_or_tid_for_duplicates_--;
1751   }
1752 
IsValidPidOrTid(uint32_t pid_or_tid)1753   bool IsValidPidOrTid(uint32_t pid_or_tid) {
1754     const auto& process_table = storage_->process_table();
1755     for (auto it = process_table.IterateRows(); it; ++it) {
1756       if (it.pid() == pid_or_tid)
1757         return true;
1758     }
1759 
1760     const auto& thread_table = storage_->thread_table();
1761     for (auto it = thread_table.IterateRows(); it; ++it) {
1762       if (it.tid() == pid_or_tid)
1763         return true;
1764     }
1765     return false;
1766   }
1767 
FillInProcessEventDetails(const Json::Value & event,uint32_t pid)1768   static Json::Value FillInProcessEventDetails(const Json::Value& event,
1769                                                uint32_t pid) {
1770     Json::Value output = event;
1771     output["pid"] = Json::Int(pid);
1772     output["tid"] = Json::Int(-1);
1773     return output;
1774   }
1775 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,int64_t value,const std::string & units)1776   static void AddAttributeToMemoryNode(Json::Value* event,
1777                                        const std::string& path,
1778                                        const std::string& key,
1779                                        int64_t value,
1780                                        const std::string& units) {
1781     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1782         base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(value));
1783     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1784         "scalar";
1785     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1786         units;
1787   }
1788 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,const std::string & value,const std::string & units="")1789   static void AddAttributeToMemoryNode(Json::Value* event,
1790                                        const std::string& path,
1791                                        const std::string& key,
1792                                        const std::string& value,
1793                                        const std::string& units = "") {
1794     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1795         value;
1796     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1797         "string";
1798     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1799         units;
1800   }
1801 
GetCounterValue(TrackId track_id,int64_t ts)1802   uint64_t GetCounterValue(TrackId track_id, int64_t ts) {
1803     const auto& counter_table = storage_->counter_table();
1804     auto begin = counter_table.ts().begin();
1805     auto end = counter_table.ts().end();
1806     PERFETTO_DCHECK(counter_table.ts().IsSorted() &&
1807                     counter_table.ts().IsColumnType<int64_t>());
1808     // The timestamp column is sorted, so we can binary search for a matching
1809     // timestamp. Note that we don't use RowMap operations like FilterInto()
1810     // here because they bloat trace processor's binary size in Chrome too much.
1811     auto it = std::lower_bound(begin, end, ts,
1812                                [](const SqlValue& value, int64_t expected_ts) {
1813                                  return value.AsLong() < expected_ts;
1814                                });
1815     for (; it < end; ++it) {
1816       if ((*it).AsLong() != ts)
1817         break;
1818       if (auto rr = counter_table[it.row()]; rr.track_id() == track_id) {
1819         return static_cast<uint64_t>(rr.value());
1820       }
1821     }
1822     return 0;
1823   }
1824 
1825   const TraceStorage* storage_;
1826   ArgsBuilder args_builder_;
1827   TraceFormatWriter writer_;
1828 
1829   // If a pid/tid is duplicated between two or more  different processes/threads
1830   // (pid/tid reuse), we export the subsequent occurrences with different
1831   // pids/tids that is visibly different from regular pids/tids - counting down
1832   // from uint32_t max.
1833   uint32_t next_exported_pid_or_tid_for_duplicates_ =
1834       std::numeric_limits<uint32_t>::max();
1835 
1836   std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1837   std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1838   std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1839       utids_to_exported_pids_and_tids_;
1840   std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1841       exported_pids_and_tids_to_utids_;
1842 };
1843 
1844 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1845 
1846 }  // namespace
1847 
1848 OutputWriter::OutputWriter() = default;
1849 OutputWriter::~OutputWriter() = default;
1850 
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1851 base::Status ExportJson(const TraceStorage* storage,
1852                         OutputWriter* output,
1853                         ArgumentFilterPredicate argument_filter,
1854                         MetadataFilterPredicate metadata_filter,
1855                         LabelFilterPredicate label_filter) {
1856 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1857   JsonExporter exporter(storage, output, std::move(argument_filter),
1858                         std::move(metadata_filter), std::move(label_filter));
1859   return exporter.Export();
1860 #else
1861   perfetto::base::ignore_result(storage);
1862   perfetto::base::ignore_result(output);
1863   perfetto::base::ignore_result(argument_filter);
1864   perfetto::base::ignore_result(metadata_filter);
1865   perfetto::base::ignore_result(label_filter);
1866   return base::ErrStatus("JSON support is not compiled in this build");
1867 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1868 }
1869 
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1870 base::Status ExportJson(TraceProcessorStorage* tp,
1871                         OutputWriter* output,
1872                         ArgumentFilterPredicate argument_filter,
1873                         MetadataFilterPredicate metadata_filter,
1874                         LabelFilterPredicate label_filter) {
1875   const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1876                                     ->context()
1877                                     ->storage.get();
1878   return ExportJson(storage, output, std::move(argument_filter),
1879                     std::move(metadata_filter), std::move(label_filter));
1880 }
1881 
ExportJson(const TraceStorage * storage,FILE * output)1882 base::Status ExportJson(const TraceStorage* storage, FILE* output) {
1883   FileWriter writer(output);
1884   return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1885 }
1886 
1887 }  // namespace perfetto::trace_processor::json
1888