1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <cstdint>
18 #include <limits>
19
20 #include <stdint.h>
21
22 #include "src/trace_processor/importers/common/args_translation_table.h"
23 #include "src/trace_processor/importers/common/process_tracker.h"
24 #include "src/trace_processor/importers/common/slice_tracker.h"
25 #include "src/trace_processor/importers/common/slice_translation_table.h"
26 #include "src/trace_processor/importers/common/track_tracker.h"
27 #include "src/trace_processor/storage/trace_storage.h"
28 #include "src/trace_processor/types/trace_processor_context.h"
29
30 namespace perfetto {
31 namespace trace_processor {
32 namespace {
33 constexpr uint32_t kMaxDepth = 512;
34 }
35
SliceTracker(TraceProcessorContext * context)36 SliceTracker::SliceTracker(TraceProcessorContext* context)
37 : legacy_unnestable_begin_count_string_id_(
38 context->storage->InternString("legacy_unnestable_begin_count")),
39 legacy_unnestable_last_begin_ts_string_id_(
40 context->storage->InternString("legacy_unnestable_last_begin_ts")),
41 context_(context) {}
42
43 SliceTracker::~SliceTracker() = default;
44
Begin(int64_t timestamp,TrackId track_id,StringId category,StringId raw_name,SetArgsCallback args_callback)45 std::optional<SliceId> SliceTracker::Begin(int64_t timestamp,
46 TrackId track_id,
47 StringId category,
48 StringId raw_name,
49 SetArgsCallback args_callback) {
50 const StringId name =
51 context_->slice_translation_table->TranslateName(raw_name);
52 tables::SliceTable::Row row(timestamp, kPendingDuration, track_id, category,
53 name);
54 return StartSlice(timestamp, track_id, args_callback, [this, &row]() {
55 return context_->storage->mutable_slice_table()->Insert(row).id;
56 });
57 }
58
BeginLegacyUnnestable(tables::SliceTable::Row row,SetArgsCallback args_callback)59 void SliceTracker::BeginLegacyUnnestable(tables::SliceTable::Row row,
60 SetArgsCallback args_callback) {
61 if (row.name) {
62 row.name = context_->slice_translation_table->TranslateName(*row.name);
63 }
64
65 // Ensure that the duration is pending for this row.
66 // TODO(lalitm): change this to eventually use null instead of -1.
67 row.dur = kPendingDuration;
68
69 // Double check that if we've seen this track in the past, it was also
70 // marked as unnestable then.
71 #if PERFETTO_DCHECK_IS_ON()
72 auto* it = stacks_.Find(row.track_id);
73 PERFETTO_DCHECK(!it || it->is_legacy_unnestable);
74 #endif
75
76 // Ensure that StartSlice knows that this track is unnestable.
77 stacks_[row.track_id].is_legacy_unnestable = true;
78
79 StartSlice(row.ts, row.track_id, args_callback, [this, &row]() {
80 return context_->storage->mutable_slice_table()->Insert(row).id;
81 });
82 }
83
Scoped(int64_t timestamp,TrackId track_id,StringId category,StringId raw_name,int64_t duration,SetArgsCallback args_callback)84 std::optional<SliceId> SliceTracker::Scoped(int64_t timestamp,
85 TrackId track_id,
86 StringId category,
87 StringId raw_name,
88 int64_t duration,
89 SetArgsCallback args_callback) {
90 PERFETTO_DCHECK(duration >= 0);
91
92 const StringId name =
93 context_->slice_translation_table->TranslateName(raw_name);
94 tables::SliceTable::Row row(timestamp, duration, track_id, category, name);
95 return StartSlice(timestamp, track_id, args_callback, [this, &row]() {
96 return context_->storage->mutable_slice_table()->Insert(row).id;
97 });
98 }
99
End(int64_t timestamp,TrackId track_id,StringId category,StringId raw_name,SetArgsCallback args_callback)100 std::optional<SliceId> SliceTracker::End(int64_t timestamp,
101 TrackId track_id,
102 StringId category,
103 StringId raw_name,
104 SetArgsCallback args_callback) {
105 const StringId name =
106 context_->slice_translation_table->TranslateName(raw_name);
107 auto finder = [this, category, name](const SlicesStack& stack) {
108 return MatchingIncompleteSliceIndex(stack, name, category);
109 };
110 return CompleteSlice(timestamp, track_id, args_callback, finder);
111 }
112
AddArgs(TrackId track_id,StringId category,StringId name,SetArgsCallback args_callback)113 std::optional<uint32_t> SliceTracker::AddArgs(TrackId track_id,
114 StringId category,
115 StringId name,
116 SetArgsCallback args_callback) {
117 auto* it = stacks_.Find(track_id);
118 if (!it)
119 return std::nullopt;
120
121 auto& stack = it->slice_stack;
122 if (stack.empty())
123 return std::nullopt;
124
125 auto* slices = context_->storage->mutable_slice_table();
126 std::optional<uint32_t> stack_idx =
127 MatchingIncompleteSliceIndex(stack, name, category);
128 if (!stack_idx.has_value())
129 return std::nullopt;
130
131 tables::SliceTable::RowNumber num = stack[*stack_idx].row;
132 tables::SliceTable::RowReference ref = num.ToRowReference(slices);
133 PERFETTO_DCHECK(ref.dur() == kPendingDuration);
134
135 // Add args to current pending slice.
136 ArgsTracker* tracker = &stack[*stack_idx].args_tracker;
137 auto bound_inserter = tracker->AddArgsTo(ref.id());
138 args_callback(&bound_inserter);
139 return num.row_number();
140 }
141
StartSlice(int64_t timestamp,TrackId track_id,SetArgsCallback args_callback,std::function<SliceId ()> inserter)142 std::optional<SliceId> SliceTracker::StartSlice(
143 int64_t timestamp,
144 TrackId track_id,
145 SetArgsCallback args_callback,
146 std::function<SliceId()> inserter) {
147 // At this stage all events should be globally timestamp ordered.
148 if (timestamp < prev_timestamp_) {
149 context_->storage->IncrementStats(stats::slice_out_of_order);
150 return std::nullopt;
151 }
152 prev_timestamp_ = timestamp;
153
154 auto& track_info = stacks_[track_id];
155 auto& stack = track_info.slice_stack;
156
157 if (track_info.is_legacy_unnestable) {
158 PERFETTO_DCHECK(stack.size() <= 1);
159
160 track_info.legacy_unnestable_begin_count++;
161 track_info.legacy_unnestable_last_begin_ts = timestamp;
162
163 // If this is an unnestable track, don't start a new slice if one already
164 // exists.
165 if (!stack.empty()) {
166 return std::nullopt;
167 }
168 }
169
170 auto* slices = context_->storage->mutable_slice_table();
171 MaybeCloseStack(timestamp, stack, track_id);
172
173 size_t depth = stack.size();
174
175 std::optional<tables::SliceTable::RowReference> parent_ref =
176 depth == 0 ? std::nullopt
177 : std::make_optional(stack.back().row.ToRowReference(slices));
178 int64_t parent_stack_id = parent_ref ? parent_ref->stack_id() : 0;
179 std::optional<tables::SliceTable::Id> parent_id =
180 parent_ref ? std::make_optional(parent_ref->id()) : std::nullopt;
181
182 SliceId id = inserter();
183 tables::SliceTable::RowReference ref = *slices->FindById(id);
184 if (depth >= kMaxDepth) {
185 auto parent_name = context_->storage->GetString(
186 parent_ref->name().value_or(kNullStringId));
187 auto name =
188 context_->storage->GetString(ref.name().value_or(kNullStringId));
189 PERFETTO_DLOG("Last slice: %s", parent_name.c_str());
190 PERFETTO_DLOG("Current slice: %s", name.c_str());
191 PERFETTO_DFATAL("Slices with too large depth found.");
192 return std::nullopt;
193 }
194 StackPush(track_id, ref);
195
196 // Post fill all the relevant columns. All the other columns should have
197 // been filled by the inserter.
198 ref.set_depth(static_cast<uint32_t>(depth));
199 ref.set_parent_stack_id(parent_stack_id);
200 ref.set_stack_id(GetStackHash(stack));
201 if (parent_id)
202 ref.set_parent_id(*parent_id);
203
204 if (args_callback) {
205 auto bound_inserter = stack.back().args_tracker.AddArgsTo(id);
206 args_callback(&bound_inserter);
207 }
208 return id;
209 }
210
CompleteSlice(int64_t timestamp,TrackId track_id,SetArgsCallback args_callback,std::function<std::optional<uint32_t> (const SlicesStack &)> finder)211 std::optional<SliceId> SliceTracker::CompleteSlice(
212 int64_t timestamp,
213 TrackId track_id,
214 SetArgsCallback args_callback,
215 std::function<std::optional<uint32_t>(const SlicesStack&)> finder) {
216 // At this stage all events should be globally timestamp ordered.
217 if (timestamp < prev_timestamp_) {
218 context_->storage->IncrementStats(stats::slice_out_of_order);
219 return std::nullopt;
220 }
221 prev_timestamp_ = timestamp;
222
223 auto it = stacks_.Find(track_id);
224 if (!it)
225 return std::nullopt;
226
227 TrackInfo& track_info = *it;
228 SlicesStack& stack = track_info.slice_stack;
229 MaybeCloseStack(timestamp, stack, track_id);
230 if (stack.empty())
231 return std::nullopt;
232
233 auto* slices = context_->storage->mutable_slice_table();
234 std::optional<uint32_t> stack_idx = finder(stack);
235
236 // If we are trying to close slices that are not open on the stack (e.g.,
237 // slices that began before tracing started), bail out.
238 if (!stack_idx)
239 return std::nullopt;
240
241 const auto& slice_info = stack[stack_idx.value()];
242
243 tables::SliceTable::RowReference ref = slice_info.row.ToRowReference(slices);
244 PERFETTO_DCHECK(ref.dur() == kPendingDuration);
245 ref.set_dur(timestamp - ref.ts());
246
247 ArgsTracker& tracker = stack[stack_idx.value()].args_tracker;
248 if (args_callback) {
249 auto bound_inserter = tracker.AddArgsTo(ref.id());
250 args_callback(&bound_inserter);
251 }
252
253 // Add the legacy unnestable args if they exist.
254 if (track_info.is_legacy_unnestable) {
255 auto bound_inserter = tracker.AddArgsTo(ref.id());
256 bound_inserter.AddArg(
257 legacy_unnestable_begin_count_string_id_,
258 Variadic::Integer(track_info.legacy_unnestable_begin_count));
259 bound_inserter.AddArg(
260 legacy_unnestable_last_begin_ts_string_id_,
261 Variadic::Integer(track_info.legacy_unnestable_last_begin_ts));
262 }
263
264 // If this slice is the top slice on the stack, pop it off.
265 if (*stack_idx == stack.size() - 1) {
266 StackPop(track_id);
267 }
268 return ref.id();
269 }
270
271 // Returns the first incomplete slice in the stack with matching name and
272 // category. We assume null category/name matches everything. Returns
273 // std::nullopt if no matching slice is found.
MatchingIncompleteSliceIndex(const SlicesStack & stack,StringId name,StringId category)274 std::optional<uint32_t> SliceTracker::MatchingIncompleteSliceIndex(
275 const SlicesStack& stack,
276 StringId name,
277 StringId category) {
278 auto* slices = context_->storage->mutable_slice_table();
279 for (int i = static_cast<int>(stack.size()) - 1; i >= 0; i--) {
280 tables::SliceTable::RowReference ref =
281 stack[static_cast<size_t>(i)].row.ToRowReference(slices);
282 if (ref.dur() != kPendingDuration)
283 continue;
284 std::optional<StringId> other_category = ref.category();
285 if (!category.is_null() && (!other_category || other_category->is_null() ||
286 category != other_category)) {
287 continue;
288 }
289 std::optional<StringId> other_name = ref.name();
290 if (!name.is_null() && other_name && !other_name->is_null() &&
291 name != other_name) {
292 continue;
293 }
294 return static_cast<uint32_t>(i);
295 }
296 return std::nullopt;
297 }
298
MaybeAddTranslatableArgs(SliceInfo & slice_info)299 void SliceTracker::MaybeAddTranslatableArgs(SliceInfo& slice_info) {
300 if (!slice_info.args_tracker.NeedsTranslation(
301 *context_->args_translation_table)) {
302 return;
303 }
304 const auto& table = context_->storage->slice_table();
305 tables::SliceTable::ConstRowReference ref =
306 slice_info.row.ToRowReference(table);
307 translatable_args_.emplace_back(TranslatableArgs{
308 ref.id(),
309 std::move(slice_info.args_tracker)
310 .ToCompactArgSet(table.arg_set_id(), slice_info.row.row_number())});
311 }
312
FlushPendingSlices()313 void SliceTracker::FlushPendingSlices() {
314 // Clear the remaining stack entries. This ensures that any pending args are
315 // written to the storage. We don't close any slices with kPendingDuration so
316 // that the UI can still distinguish such "incomplete" slices.
317 //
318 // TODO(eseckler): Reconsider whether we want to close pending slices by
319 // setting their duration to |trace_end - event_start|. Might still want some
320 // additional way of flagging these events as "incomplete" to the UI.
321
322 // Make sure that args for all incomplete slice are translated.
323 for (auto it = stacks_.GetIterator(); it; ++it) {
324 auto& track_info = it.value();
325 for (auto& slice_info : track_info.slice_stack) {
326 MaybeAddTranslatableArgs(slice_info);
327 }
328 }
329
330 // Translate and flush all pending args.
331 for (const auto& translatable_arg : translatable_args_) {
332 auto bound_inserter =
333 context_->args_tracker->AddArgsTo(translatable_arg.slice_id);
334 context_->args_translation_table->TranslateArgs(
335 translatable_arg.compact_arg_set, bound_inserter);
336 }
337 translatable_args_.clear();
338
339 stacks_.Clear();
340 }
341
SetOnSliceBeginCallback(OnSliceBeginCallback callback)342 void SliceTracker::SetOnSliceBeginCallback(OnSliceBeginCallback callback) {
343 on_slice_begin_callback_ = callback;
344 }
345
GetTopmostSliceOnTrack(TrackId track_id) const346 std::optional<SliceId> SliceTracker::GetTopmostSliceOnTrack(
347 TrackId track_id) const {
348 const auto* iter = stacks_.Find(track_id);
349 if (!iter)
350 return std::nullopt;
351 const auto& stack = iter->slice_stack;
352 if (stack.empty())
353 return std::nullopt;
354 const auto& slice = context_->storage->slice_table();
355 return stack.back().row.ToRowReference(slice).id();
356 }
357
MaybeCloseStack(int64_t ts,const SlicesStack & stack,TrackId track_id)358 void SliceTracker::MaybeCloseStack(int64_t ts,
359 const SlicesStack& stack,
360 TrackId track_id) {
361 auto* slices = context_->storage->mutable_slice_table();
362 bool incomplete_descendent = false;
363 for (int i = static_cast<int>(stack.size()) - 1; i >= 0; i--) {
364 tables::SliceTable::RowReference ref =
365 stack[static_cast<size_t>(i)].row.ToRowReference(slices);
366
367 int64_t start_ts = ref.ts();
368 int64_t dur = ref.dur();
369 int64_t end_ts = start_ts + dur;
370 if (dur == kPendingDuration) {
371 incomplete_descendent = true;
372 continue;
373 }
374
375 if (incomplete_descendent) {
376 PERFETTO_DCHECK(ts >= start_ts);
377
378 // Only process slices if the ts is past the end of the slice.
379 if (ts <= end_ts)
380 continue;
381
382 // This usually happens because we have two slices that are partially
383 // overlapping.
384 // [ slice 1 ]
385 // [ slice 2 ]
386 // This is invalid in chrome and should be fixed. Duration events should
387 // either be nested or disjoint, never partially intersecting.
388 // KI: if tracing both binder and system calls on android, "binder reply"
389 // slices will try to escape the enclosing sys_ioctl.
390 PERFETTO_DLOG(
391 "Incorrect ordering of begin/end slice events. "
392 "Truncating incomplete descendants to the end of slice "
393 "%s[%" PRId64 ", %" PRId64 "] due to an event at ts=%" PRId64 ".",
394 context_->storage->GetString(ref.name().value_or(kNullStringId))
395 .c_str(),
396 start_ts, end_ts, ts);
397 context_->storage->IncrementStats(stats::misplaced_end_event);
398
399 // Every slice below this one should have a pending duration. Update
400 // of them to have the end ts of the current slice and pop them
401 // all off.
402 for (int j = static_cast<int>(stack.size()) - 1; j > i; --j) {
403 tables::SliceTable::RowReference child_ref =
404 stack[static_cast<size_t>(j)].row.ToRowReference(slices);
405 PERFETTO_DCHECK(child_ref.dur() == kPendingDuration);
406 child_ref.set_dur(end_ts - child_ref.ts());
407 StackPop(track_id);
408 }
409
410 // Also pop the current row itself and reset the incomplete flag.
411 StackPop(track_id);
412 incomplete_descendent = false;
413
414 continue;
415 }
416
417 if (end_ts <= ts) {
418 StackPop(track_id);
419 }
420 }
421 }
422
GetStackHash(const SlicesStack & stack)423 int64_t SliceTracker::GetStackHash(const SlicesStack& stack) {
424 PERFETTO_DCHECK(!stack.empty());
425
426 const auto& slices = context_->storage->slice_table();
427
428 base::Hasher hash;
429 for (size_t i = 0; i < stack.size(); i++) {
430 auto ref = stack[i].row.ToRowReference(slices);
431 hash.Update(ref.category().value_or(kNullStringId).raw_id());
432 hash.Update(ref.name().value_or(kNullStringId).raw_id());
433 }
434
435 // For clients which don't have an integer type (i.e. Javascript), returning
436 // hashes which have the top 11 bits set leads to numbers which are
437 // unrepresenatble. This means that clients cannot filter using this number as
438 // it will be meaningless when passed back to us. For this reason, make sure
439 // that the hash is always less than 2^53 - 1.
440 constexpr uint64_t kSafeBitmask = (1ull << 53) - 1;
441 return static_cast<int64_t>(hash.digest() & kSafeBitmask);
442 }
443
StackPop(TrackId track_id)444 void SliceTracker::StackPop(TrackId track_id) {
445 auto& stack = stacks_[track_id].slice_stack;
446 MaybeAddTranslatableArgs(stack.back());
447 stack.pop_back();
448 }
449
StackPush(TrackId track_id,tables::SliceTable::RowReference ref)450 void SliceTracker::StackPush(TrackId track_id,
451 tables::SliceTable::RowReference ref) {
452 stacks_[track_id].slice_stack.push_back(
453 SliceInfo{ref.ToRowNumber(), ArgsTracker(context_)});
454 if (on_slice_begin_callback_) {
455 on_slice_begin_callback_(track_id, ref.id());
456 }
457 }
458
459 } // namespace trace_processor
460 } // namespace perfetto
461