1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/common/thread_state_tracker.h"
18
19 #include <cstdint>
20 #include <optional>
21
22 #include "src/trace_processor/importers/common/cpu_tracker.h"
23 #include "src/trace_processor/importers/common/process_tracker.h"
24
25 namespace perfetto {
26 namespace trace_processor {
ThreadStateTracker(TraceProcessorContext * context)27 ThreadStateTracker::ThreadStateTracker(TraceProcessorContext* context)
28 : storage_(context->storage.get()),
29 context_(context),
30 running_string_id_(storage_->InternString("Running")),
31 runnable_string_id_(storage_->InternString("R")) {}
32 ThreadStateTracker::~ThreadStateTracker() = default;
33
PushSchedSwitchEvent(int64_t event_ts,uint32_t cpu,UniqueTid prev_utid,StringId prev_state,UniqueTid next_utid)34 void ThreadStateTracker::PushSchedSwitchEvent(int64_t event_ts,
35 uint32_t cpu,
36 UniqueTid prev_utid,
37 StringId prev_state,
38 UniqueTid next_utid) {
39 // Code related to previous utid. If the thread wasn't running before we know
40 // we lost data and should close the slice accordingly.
41 bool data_loss_cond =
42 HasPreviousRowNumbersForUtid(prev_utid) &&
43 !IsRunning(RowNumToRef(prev_row_numbers_for_thread_[prev_utid]->last_row)
44 .state());
45 ClosePendingState(event_ts, prev_utid, data_loss_cond);
46 AddOpenState(event_ts, prev_utid, prev_state);
47
48 // Code related to next utid.
49 // Due to forced migration, it is possible for the same thread to be
50 // scheduled on different CPUs at the same time.
51 // We work around this problem by truncating the previous state to the start
52 // of this state and starting the next state normally. This is why we don't
53 // check whether previous state is running/runnable. See b/186509316 for
54 // details and an example on when this happens.
55 ClosePendingState(event_ts, next_utid, false);
56 AddOpenState(event_ts, next_utid, running_string_id_, cpu);
57 }
58
PushWakingEvent(int64_t event_ts,UniqueTid utid,UniqueTid waker_utid,std::optional<uint16_t> common_flags)59 void ThreadStateTracker::PushWakingEvent(int64_t event_ts,
60 UniqueTid utid,
61 UniqueTid waker_utid,
62 std::optional<uint16_t> common_flags) {
63 // If thread has not had a sched switch event, just open a runnable state.
64 // There's no pending state to close.
65 if (!HasPreviousRowNumbersForUtid(utid)) {
66 AddOpenState(event_ts, utid, runnable_string_id_, std::nullopt, waker_utid,
67 common_flags);
68 return;
69 }
70
71 auto last_row_ref = RowNumToRef(prev_row_numbers_for_thread_[utid]->last_row);
72
73 // Occasionally, it is possible to get a waking event for a thread
74 // which is already in a runnable state. When this happens (or if the thread
75 // is running), we just ignore the waking event. See b/186509316 for details
76 // and an example on when this happens. Only blocked events can be waken up.
77 if (!IsBlocked(last_row_ref.state())) {
78 // If we receive a waking event while we are not blocked, we ignore this
79 // in the |thread_state| table but we track in the |sched_wakeup| table.
80 // The |thread_state_id| in |sched_wakeup| is the current running/runnable
81 // event.
82 std::optional<uint32_t> irq_context =
83 common_flags
84 ? std::make_optional(CommonFlagsToIrqContext(*common_flags))
85 : std::nullopt;
86 storage_->mutable_spurious_sched_wakeup_table()->Insert(
87 {event_ts, prev_row_numbers_for_thread_[utid]->last_row.row_number(),
88 irq_context, utid, waker_utid});
89 return;
90 }
91
92 // Close the sleeping state and open runnable state.
93 ClosePendingState(event_ts, utid, false);
94 AddOpenState(event_ts, utid, runnable_string_id_, std::nullopt, waker_utid,
95 common_flags);
96 }
97
PushNewTaskEvent(int64_t event_ts,UniqueTid utid,UniqueTid waker_utid)98 void ThreadStateTracker::PushNewTaskEvent(int64_t event_ts,
99 UniqueTid utid,
100 UniqueTid waker_utid) {
101 // open a runnable state with a non-interrupt wakeup from the cloning thread.
102 AddOpenState(event_ts, utid, runnable_string_id_, /*cpu=*/std::nullopt,
103 waker_utid, /*common_flags=*/0);
104 }
105
PushBlockedReason(UniqueTid utid,std::optional<bool> io_wait,std::optional<StringId> blocked_function)106 void ThreadStateTracker::PushBlockedReason(
107 UniqueTid utid,
108 std::optional<bool> io_wait,
109 std::optional<StringId> blocked_function) {
110 // Return if there is no state, as there is are no previous rows available.
111 if (!HasPreviousRowNumbersForUtid(utid))
112 return;
113
114 // Return if no previous bocked row exists.
115 auto blocked_row_number =
116 prev_row_numbers_for_thread_[utid]->last_blocked_row;
117 if (!blocked_row_number.has_value())
118 return;
119
120 auto row_reference = RowNumToRef(blocked_row_number.value());
121 if (io_wait.has_value()) {
122 row_reference.set_io_wait(*io_wait);
123 }
124 if (blocked_function.has_value()) {
125 row_reference.set_blocked_function(*blocked_function);
126 }
127 }
128
AddOpenState(int64_t ts,UniqueTid utid,StringId state,std::optional<uint16_t> cpu,std::optional<UniqueTid> waker_utid,std::optional<uint16_t> common_flags)129 void ThreadStateTracker::AddOpenState(int64_t ts,
130 UniqueTid utid,
131 StringId state,
132 std::optional<uint16_t> cpu,
133 std::optional<UniqueTid> waker_utid,
134 std::optional<uint16_t> common_flags) {
135 // Ignore the swapper utid because it corresponds to the swapper thread which
136 // doesn't make sense to insert.
137 if (utid == context_->process_tracker->swapper_utid())
138 return;
139
140 // Insert row with unfinished state
141 tables::ThreadStateTable::Row row;
142 row.ts = ts;
143 row.waker_utid = waker_utid;
144 row.dur = -1;
145 row.utid = utid;
146 row.state = state;
147 if (cpu)
148 row.ucpu = context_->cpu_tracker->GetOrCreateCpu(*cpu);
149 if (common_flags.has_value()) {
150 row.irq_context = CommonFlagsToIrqContext(*common_flags);
151 }
152
153 if (waker_utid.has_value() && HasPreviousRowNumbersForUtid(*waker_utid)) {
154 auto waker_row =
155 RowNumToRef(prev_row_numbers_for_thread_[*waker_utid]->last_row);
156
157 // We expect all wakers to be Running. But there are 2 cases where this
158 // might not be true:
159 // 1. At the start of a trace the 'waker CPU' has not yet started
160 // emitting events.
161 // 2. Data loss.
162 if (IsRunning(waker_row.state())) {
163 row.waker_id = std::make_optional(waker_row.id());
164 }
165 }
166
167 auto row_num = storage_->mutable_thread_state_table()->Insert(row).row_number;
168
169 if (utid >= prev_row_numbers_for_thread_.size()) {
170 prev_row_numbers_for_thread_.resize(utid + 1);
171 }
172
173 if (!prev_row_numbers_for_thread_[utid].has_value()) {
174 prev_row_numbers_for_thread_[utid] = RelatedRows{std::nullopt, row_num};
175 }
176
177 if (IsRunning(state)) {
178 prev_row_numbers_for_thread_[utid] = RelatedRows{std::nullopt, row_num};
179 } else if (IsBlocked(state)) {
180 prev_row_numbers_for_thread_[utid] = RelatedRows{row_num, row_num};
181 } else /* if (IsRunnable(state)) */ {
182 prev_row_numbers_for_thread_[utid]->last_row = row_num;
183 }
184 }
185
CommonFlagsToIrqContext(uint32_t common_flags)186 uint32_t ThreadStateTracker::CommonFlagsToIrqContext(uint32_t common_flags) {
187 // If common_flags contains TRACE_FLAG_HARDIRQ | TRACE_FLAG_SOFTIRQ, wakeup
188 // was emitted in interrupt context.
189 // See:
190 // https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/include/linux/trace_events.h
191 // TODO(rsavitski): we could also include TRACE_FLAG_NMI for a complete
192 // "interrupt context" meaning. But at the moment it's not necessary as this
193 // is used for sched_waking events, which are not emitted from NMI contexts.
194 return common_flags & (0x08 | 0x10) ? 1 : 0;
195 }
196
ClosePendingState(int64_t end_ts,UniqueTid utid,bool data_loss)197 void ThreadStateTracker::ClosePendingState(int64_t end_ts,
198 UniqueTid utid,
199 bool data_loss) {
200 // Discard close if there is no open state to close.
201 if (!HasPreviousRowNumbersForUtid(utid))
202 return;
203
204 auto row_ref = RowNumToRef(prev_row_numbers_for_thread_[utid]->last_row);
205
206 // Update the duration only for states without data loss.
207 if (!data_loss) {
208 row_ref.set_dur(end_ts - row_ref.ts());
209 }
210 }
211
IsRunning(StringId state)212 bool ThreadStateTracker::IsRunning(StringId state) {
213 return state == running_string_id_;
214 }
215
IsRunnable(StringId state)216 bool ThreadStateTracker::IsRunnable(StringId state) {
217 return state == runnable_string_id_;
218 }
219
IsBlocked(StringId state)220 bool ThreadStateTracker::IsBlocked(StringId state) {
221 return !(IsRunnable(state) || IsRunning(state));
222 }
223
224 } // namespace trace_processor
225 } // namespace perfetto
226