1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "LatencyAggregator"
18 #include "LatencyAggregator.h"
19
20 #include <inttypes.h>
21
22 #include <android-base/stringprintf.h>
23 #include <input/Input.h>
24 #include <log/log.h>
25 #include <server_configurable_flags/get_flags.h>
26
27 using android::base::StringPrintf;
28 using dist_proc::aggregation::KllQuantile;
29 using std::chrono_literals::operator""ms;
30
31 namespace {
32
33 // Convert the provided nanoseconds into hundreds of microseconds.
34 // Use hundreds of microseconds (as opposed to microseconds) to preserve space.
ns2hus(nsecs_t nanos)35 static inline int64_t ns2hus(nsecs_t nanos) {
36 return ns2us(nanos) / 100;
37 }
38
39 // The maximum number of events that we will store in the statistics. Any events that we will
40 // receive after we have reached this number will be ignored. We could also implement this by
41 // checking the actual size of the current data and making sure that we do not go over. However,
42 // the serialization process of sketches is too heavy (1 ms for all 14 sketches), and would be too
43 // much to do (even if infrequently).
44 // The value here has been determined empirically.
45 static constexpr size_t MAX_EVENTS_FOR_STATISTICS = 20000;
46
47 // Category (=namespace) name for the input settings that are applied at boot time
48 static const char* INPUT_NATIVE_BOOT = "input_native_boot";
49 // Feature flag name for the threshold of end-to-end touch latency that would trigger
50 // SlowEventReported atom to be pushed
51 static const char* SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS =
52 "slow_event_min_reporting_latency_millis";
53 // Feature flag name for the minimum delay before reporting a slow event after having just reported
54 // a slow event. This helps limit the amount of data sent to the server
55 static const char* SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS =
56 "slow_event_min_reporting_interval_millis";
57
58 // If an event has end-to-end latency > 200 ms, it will get reported as a slow event.
59 std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY = 200ms;
60 // If we receive two slow events less than 1 min apart, we will only report 1 of them.
61 std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL = 60000ms;
62
getSlowEventMinReportingLatency()63 static std::chrono::milliseconds getSlowEventMinReportingLatency() {
64 std::string millis = server_configurable_flags::
65 GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS,
66 std::to_string(
67 DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY.count()));
68 return std::chrono::milliseconds(std::stoi(millis));
69 }
70
getSlowEventMinReportingInterval()71 static std::chrono::milliseconds getSlowEventMinReportingInterval() {
72 std::string millis = server_configurable_flags::
73 GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS,
74 std::to_string(
75 DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL.count()));
76 return std::chrono::milliseconds(std::stoi(millis));
77 }
78
79 } // namespace
80
81 namespace android::inputdispatcher {
82
83 /**
84 * Same as android::util::BytesField, but doesn't store raw pointers, and therefore deletes its
85 * resources automatically.
86 */
87 class SafeBytesField {
88 public:
SafeBytesField(dist_proc::aggregation::KllQuantile & quantile)89 explicit SafeBytesField(dist_proc::aggregation::KllQuantile& quantile) {
90 const zetasketch::android::AggregatorStateProto aggProto = quantile.SerializeToProto();
91 mBuffer.resize(aggProto.ByteSizeLong());
92 aggProto.SerializeToArray(mBuffer.data(), mBuffer.size());
93 }
getBytesField()94 android::util::BytesField getBytesField() {
95 return android::util::BytesField(mBuffer.data(), mBuffer.size());
96 }
97
98 private:
99 std::vector<char> mBuffer;
100 };
101
LatencyAggregator()102 LatencyAggregator::LatencyAggregator() {
103 AStatsManager_setPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH, nullptr,
104 LatencyAggregator::pullAtomCallback, this);
105 dist_proc::aggregation::KllQuantileOptions options;
106 options.set_inv_eps(100); // Request precision of 1.0%, instead of default 0.1%
107 for (size_t i = 0; i < SketchIndex::SIZE; i++) {
108 mDownSketches[i] = KllQuantile::Create(options);
109 mMoveSketches[i] = KllQuantile::Create(options);
110 }
111 }
112
~LatencyAggregator()113 LatencyAggregator::~LatencyAggregator() {
114 AStatsManager_clearPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH);
115 }
116
pullAtomCallback(int32_t atomTag,AStatsEventList * data,void * cookie)117 AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullAtomCallback(int32_t atomTag,
118 AStatsEventList* data,
119 void* cookie) {
120 LatencyAggregator* pAggregator = reinterpret_cast<LatencyAggregator*>(cookie);
121 if (pAggregator == nullptr) {
122 LOG_ALWAYS_FATAL("pAggregator is null!");
123 }
124 return pAggregator->pullData(data);
125 }
126
processTimeline(const InputEventTimeline & timeline)127 void LatencyAggregator::processTimeline(const InputEventTimeline& timeline) {
128 processStatistics(timeline);
129 processSlowEvent(timeline);
130 }
131
132 // This version of LatencyAggregator doesn't push any atoms
pushLatencyStatistics()133 void LatencyAggregator::pushLatencyStatistics() {}
134
processStatistics(const InputEventTimeline & timeline)135 void LatencyAggregator::processStatistics(const InputEventTimeline& timeline) {
136 std::scoped_lock lock(mLock);
137 // Before we do any processing, check that we have not yet exceeded MAX_SIZE
138 if (mNumSketchEventsProcessed >= MAX_EVENTS_FOR_STATISTICS) {
139 return;
140 }
141 mNumSketchEventsProcessed++;
142
143 std::array<std::unique_ptr<KllQuantile>, SketchIndex::SIZE>& sketches =
144 timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_DOWN
145 ? mDownSketches
146 : mMoveSketches;
147
148 // Process common ones first
149 const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
150 sketches[SketchIndex::EVENT_TO_READ]->Add(ns2hus(eventToRead));
151
152 // Now process per-connection ones
153 for (const auto& [connectionToken, connectionTimeline] : timeline.connectionTimelines) {
154 if (!connectionTimeline.isComplete()) {
155 continue;
156 }
157 const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
158 const nsecs_t deliverToConsume =
159 connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
160 const nsecs_t consumeToFinish =
161 connectionTimeline.finishTime - connectionTimeline.consumeTime;
162 const nsecs_t gpuCompletedTime =
163 connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
164 const nsecs_t presentTime =
165 connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
166 const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
167 const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
168 const nsecs_t endToEnd = presentTime - timeline.eventTime;
169
170 sketches[SketchIndex::READ_TO_DELIVER]->Add(ns2hus(readToDeliver));
171 sketches[SketchIndex::DELIVER_TO_CONSUME]->Add(ns2hus(deliverToConsume));
172 sketches[SketchIndex::CONSUME_TO_FINISH]->Add(ns2hus(consumeToFinish));
173 sketches[SketchIndex::CONSUME_TO_GPU_COMPLETE]->Add(ns2hus(consumeToGpuComplete));
174 sketches[SketchIndex::GPU_COMPLETE_TO_PRESENT]->Add(ns2hus(gpuCompleteToPresent));
175 sketches[SketchIndex::END_TO_END]->Add(ns2hus(endToEnd));
176 }
177 }
178
pullData(AStatsEventList * data)179 AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullData(AStatsEventList* data) {
180 std::scoped_lock lock(mLock);
181 std::array<std::unique_ptr<SafeBytesField>, SketchIndex::SIZE> serializedDownData;
182 std::array<std::unique_ptr<SafeBytesField>, SketchIndex::SIZE> serializedMoveData;
183 for (size_t i = 0; i < SketchIndex::SIZE; i++) {
184 serializedDownData[i] = std::make_unique<SafeBytesField>(*mDownSketches[i]);
185 serializedMoveData[i] = std::make_unique<SafeBytesField>(*mMoveSketches[i]);
186 }
187 android::util::
188 addAStatsEvent(data, android::util::INPUT_EVENT_LATENCY_SKETCH,
189 // DOWN sketches
190 serializedDownData[SketchIndex::EVENT_TO_READ]->getBytesField(),
191 serializedDownData[SketchIndex::READ_TO_DELIVER]->getBytesField(),
192 serializedDownData[SketchIndex::DELIVER_TO_CONSUME]->getBytesField(),
193 serializedDownData[SketchIndex::CONSUME_TO_FINISH]->getBytesField(),
194 serializedDownData[SketchIndex::CONSUME_TO_GPU_COMPLETE]
195 ->getBytesField(),
196 serializedDownData[SketchIndex::GPU_COMPLETE_TO_PRESENT]
197 ->getBytesField(),
198 serializedDownData[SketchIndex::END_TO_END]->getBytesField(),
199 // MOVE sketches
200 serializedMoveData[SketchIndex::EVENT_TO_READ]->getBytesField(),
201 serializedMoveData[SketchIndex::READ_TO_DELIVER]->getBytesField(),
202 serializedMoveData[SketchIndex::DELIVER_TO_CONSUME]->getBytesField(),
203 serializedMoveData[SketchIndex::CONSUME_TO_FINISH]->getBytesField(),
204 serializedMoveData[SketchIndex::CONSUME_TO_GPU_COMPLETE]
205 ->getBytesField(),
206 serializedMoveData[SketchIndex::GPU_COMPLETE_TO_PRESENT]
207 ->getBytesField(),
208 serializedMoveData[SketchIndex::END_TO_END]->getBytesField());
209
210 for (size_t i = 0; i < SketchIndex::SIZE; i++) {
211 mDownSketches[i]->Reset();
212 mMoveSketches[i]->Reset();
213 }
214 // Start new aggregations
215 mNumSketchEventsProcessed = 0;
216 return AStatsManager_PULL_SUCCESS;
217 }
218
processSlowEvent(const InputEventTimeline & timeline)219 void LatencyAggregator::processSlowEvent(const InputEventTimeline& timeline) {
220 static const std::chrono::duration sSlowEventThreshold = getSlowEventMinReportingLatency();
221 static const std::chrono::duration sSlowEventReportingInterval =
222 getSlowEventMinReportingInterval();
223 for (const auto& [token, connectionTimeline] : timeline.connectionTimelines) {
224 if (!connectionTimeline.isComplete()) {
225 continue;
226 }
227 mNumEventsSinceLastSlowEventReport++;
228 const nsecs_t presentTime =
229 connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
230 const std::chrono::nanoseconds endToEndLatency =
231 std::chrono::nanoseconds(presentTime - timeline.eventTime);
232 if (endToEndLatency < sSlowEventThreshold) {
233 continue;
234 }
235 // This is a slow event. Before we report it, check if we are reporting too often
236 const std::chrono::duration elapsedSinceLastReport =
237 std::chrono::nanoseconds(timeline.eventTime - mLastSlowEventTime);
238 if (elapsedSinceLastReport < sSlowEventReportingInterval) {
239 mNumSkippedSlowEvents++;
240 continue;
241 }
242
243 const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
244 const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
245 const nsecs_t deliverToConsume =
246 connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
247 const nsecs_t consumeToFinish =
248 connectionTimeline.finishTime - connectionTimeline.consumeTime;
249 const nsecs_t gpuCompletedTime =
250 connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
251 const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
252 const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
253
254 android::util::stats_write(android::util::SLOW_INPUT_EVENT_REPORTED,
255 timeline.inputEventActionType ==
256 InputEventActionType::MOTION_ACTION_DOWN,
257 static_cast<int32_t>(ns2us(eventToRead)),
258 static_cast<int32_t>(ns2us(readToDeliver)),
259 static_cast<int32_t>(ns2us(deliverToConsume)),
260 static_cast<int32_t>(ns2us(consumeToFinish)),
261 static_cast<int32_t>(ns2us(consumeToGpuComplete)),
262 static_cast<int32_t>(ns2us(gpuCompleteToPresent)),
263 static_cast<int32_t>(ns2us(endToEndLatency.count())),
264 static_cast<int32_t>(mNumEventsSinceLastSlowEventReport),
265 static_cast<int32_t>(mNumSkippedSlowEvents));
266 mNumEventsSinceLastSlowEventReport = 0;
267 mNumSkippedSlowEvents = 0;
268 mLastSlowEventTime = timeline.readTime;
269 }
270 }
271
dump(const char * prefix) const272 std::string LatencyAggregator::dump(const char* prefix) const {
273 std::scoped_lock lock(mLock);
274 std::string sketchDump = StringPrintf("%s Sketches:\n", prefix);
275 for (size_t i = 0; i < SketchIndex::SIZE; i++) {
276 const int64_t numDown = mDownSketches[i]->num_values();
277 SafeBytesField downBytesField(*mDownSketches[i]);
278 const float downBytesKb = downBytesField.getBytesField().arg_length * 1E-3;
279 const int64_t numMove = mMoveSketches[i]->num_values();
280 SafeBytesField moveBytesField(*mMoveSketches[i]);
281 const float moveBytesKb = moveBytesField.getBytesField().arg_length * 1E-3;
282 sketchDump +=
283 StringPrintf("%s mDownSketches[%zu]->num_values = %" PRId64 " size = %.1fKB"
284 " mMoveSketches[%zu]->num_values = %" PRId64 " size = %.1fKB\n",
285 prefix, i, numDown, downBytesKb, i, numMove, moveBytesKb);
286 }
287
288 return StringPrintf("%sLatencyAggregator:\n", prefix) + sketchDump +
289 StringPrintf("%s mNumSketchEventsProcessed=%zu\n", prefix, mNumSketchEventsProcessed) +
290 StringPrintf("%s mLastSlowEventTime=%" PRId64 "\n", prefix, mLastSlowEventTime) +
291 StringPrintf("%s mNumEventsSinceLastSlowEventReport = %zu\n", prefix,
292 mNumEventsSinceLastSlowEventReport) +
293 StringPrintf("%s mNumSkippedSlowEvents = %zu\n", prefix, mNumSkippedSlowEvents);
294 }
295
296 } // namespace android::inputdispatcher
297