1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define STATSD_DEBUG false // STOPSHIP if true
18 #include "Log.h"
19
20 #include "StatsLogProcessor.h"
21
22 #include <android-base/file.h>
23 #include <cutils/multiuser.h>
24 #include <src/active_config_list.pb.h>
25 #include <src/experiment_ids.pb.h>
26
27 #include "StatsService.h"
28 #include "android-base/stringprintf.h"
29 #include "external/StatsPullerManager.h"
30 #include "flags/FlagProvider.h"
31 #include "guardrail/StatsdStats.h"
32 #include "logd/LogEvent.h"
33 #include "metrics/CountMetricProducer.h"
34 #include "state/StateManager.h"
35 #include "stats_log_util.h"
36 #include "stats_util.h"
37 #include "statslog_statsd.h"
38 #include "storage/StorageManager.h"
39 #include "utils/api_tracing.h"
40
41 using namespace android;
42 using android::base::StringPrintf;
43 using android::util::FIELD_COUNT_REPEATED;
44 using android::util::FIELD_TYPE_BOOL;
45 using android::util::FIELD_TYPE_FLOAT;
46 using android::util::FIELD_TYPE_INT32;
47 using android::util::FIELD_TYPE_INT64;
48 using android::util::FIELD_TYPE_MESSAGE;
49 using android::util::FIELD_TYPE_STRING;
50 using android::util::ProtoOutputStream;
51 using std::vector;
52
53 namespace android {
54 namespace os {
55 namespace statsd {
56
57 using aidl::android::os::IStatsQueryCallback;
58
59 // for ConfigMetricsReportList
60 const int FIELD_ID_CONFIG_KEY = 1;
61 const int FIELD_ID_REPORTS = 2;
62 // for ConfigKey
63 const int FIELD_ID_UID = 1;
64 const int FIELD_ID_ID = 2;
65 const int FIELD_ID_REPORT_NUMBER = 3;
66 const int FIELD_ID_STATSD_STATS_ID = 4;
67 // for ConfigMetricsReport
68 // const int FIELD_ID_METRICS = 1; // written in MetricsManager.cpp
69 const int FIELD_ID_UID_MAP = 2;
70 const int FIELD_ID_LAST_REPORT_ELAPSED_NANOS = 3;
71 const int FIELD_ID_CURRENT_REPORT_ELAPSED_NANOS = 4;
72 const int FIELD_ID_LAST_REPORT_WALL_CLOCK_NANOS = 5;
73 const int FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS = 6;
74 const int FIELD_ID_DUMP_REPORT_REASON = 8;
75 const int FIELD_ID_STRINGS = 9;
76 const int FIELD_ID_DATA_CORRUPTED_REASON = 11;
77 const int FIELD_ID_ESTIMATED_DATA_BYTES = 12;
78
79 // for ActiveConfigList
80 const int FIELD_ID_ACTIVE_CONFIG_LIST_CONFIG = 1;
81
82 // for permissions checks
83 constexpr const char* kPermissionDump = "android.permission.DUMP";
84 constexpr const char* kPermissionUsage = "android.permission.PACKAGE_USAGE_STATS";
85
86 #define NS_PER_HOUR 3600 * NS_PER_SEC
87
88 #define STATS_ACTIVE_METRIC_DIR "/data/misc/stats-active-metric"
89 #define STATS_METADATA_DIR "/data/misc/stats-metadata"
90
91 // Cool down period for writing data to disk to avoid overwriting files.
92 #define WRITE_DATA_COOL_DOWN_SEC 15
93
94 namespace {
95
getOnLogEventCallName(int32_t tagId)96 const char* getOnLogEventCallName(int32_t tagId) {
97 static std::string name;
98 // to avoid new string allocation on each call
99 name.reserve(30);
100 name = "OnLogEvent-" + std::to_string(tagId);
101 return name.c_str();
102 }
103
104 } // namespace
105
StatsLogProcessor(const sp<UidMap> & uidMap,const sp<StatsPullerManager> & pullerManager,const sp<AlarmMonitor> & anomalyAlarmMonitor,const sp<AlarmMonitor> & periodicAlarmMonitor,const int64_t timeBaseNs,const std::function<bool (const ConfigKey &)> & sendBroadcast,const std::function<bool (const int &,const vector<int64_t> &)> & activateBroadcast,const std::function<void (const ConfigKey &,const string &,const vector<int64_t> &)> & sendRestrictedMetricsBroadcast,const std::shared_ptr<LogEventFilter> & logEventFilter)106 StatsLogProcessor::StatsLogProcessor(
107 const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerManager,
108 const sp<AlarmMonitor>& anomalyAlarmMonitor, const sp<AlarmMonitor>& periodicAlarmMonitor,
109 const int64_t timeBaseNs, const std::function<bool(const ConfigKey&)>& sendBroadcast,
110 const std::function<bool(const int&, const vector<int64_t>&)>& activateBroadcast,
111 const std::function<void(const ConfigKey&, const string&, const vector<int64_t>&)>&
112 sendRestrictedMetricsBroadcast,
113 const std::shared_ptr<LogEventFilter>& logEventFilter)
114 : mLastTtlTime(0),
115 mLastFlushRestrictedTime(0),
116 mLastDbGuardrailEnforcementTime(0),
117 mUidMap(uidMap),
118 mPullerManager(pullerManager),
119 mAnomalyAlarmMonitor(anomalyAlarmMonitor),
120 mPeriodicAlarmMonitor(periodicAlarmMonitor),
121 mLogEventFilter(logEventFilter),
122 mSendBroadcast(sendBroadcast),
123 mSendActivationBroadcast(activateBroadcast),
124 mSendRestrictedMetricsBroadcast(sendRestrictedMetricsBroadcast),
125 mTimeBaseNs(timeBaseNs),
126 mLargestTimestampSeen(0),
127 mLastTimestampSeen(0) {
128 mPullerManager->ForceClearPullerCache();
129 StateManager::getInstance().updateLogSources(uidMap);
130 // It is safe called locked version at constructor - no concurrent access possible
131 updateLogEventFilterLocked();
132 }
133
~StatsLogProcessor()134 StatsLogProcessor::~StatsLogProcessor() {
135 }
136
flushProtoToBuffer(ProtoOutputStream & proto,vector<uint8_t> * outData)137 static void flushProtoToBuffer(ProtoOutputStream& proto, vector<uint8_t>* outData) {
138 outData->clear();
139 outData->resize(proto.size());
140 size_t pos = 0;
141 sp<android::util::ProtoReader> reader = proto.data();
142 while (reader->readBuffer() != NULL) {
143 size_t toRead = reader->currentToRead();
144 std::memcpy(&((*outData)[pos]), reader->readBuffer(), toRead);
145 pos += toRead;
146 reader->move(toRead);
147 }
148 }
149
processFiredAnomalyAlarmsLocked(const int64_t timestampNs,unordered_set<sp<const InternalAlarm>,SpHash<InternalAlarm>> & alarmSet)150 void StatsLogProcessor::processFiredAnomalyAlarmsLocked(
151 const int64_t timestampNs,
152 unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
153 for (const auto& itr : mMetricsManagers) {
154 itr.second->onAnomalyAlarmFired(timestampNs, alarmSet);
155 }
156 }
onPeriodicAlarmFired(const int64_t timestampNs,unordered_set<sp<const InternalAlarm>,SpHash<InternalAlarm>> & alarmSet)157 void StatsLogProcessor::onPeriodicAlarmFired(
158 const int64_t timestampNs,
159 unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
160 std::lock_guard<std::mutex> lock(mMetricsMutex);
161 for (const auto& itr : mMetricsManagers) {
162 itr.second->onPeriodicAlarmFired(timestampNs, alarmSet);
163 }
164 }
165
mapIsolatedUidToHostUidIfNecessaryLocked(LogEvent * event) const166 void StatsLogProcessor::mapIsolatedUidToHostUidIfNecessaryLocked(LogEvent* event) const {
167 if (std::pair<size_t, size_t> indexRange; event->hasAttributionChain(&indexRange)) {
168 vector<FieldValue>* const fieldValues = event->getMutableValues();
169 for (size_t i = indexRange.first; i <= indexRange.second; i++) {
170 FieldValue& fieldValue = fieldValues->at(i);
171 if (isAttributionUidField(fieldValue)) {
172 const int hostUid = mUidMap->getHostUidOrSelf(fieldValue.mValue.int_value);
173 fieldValue.mValue.setInt(hostUid);
174 }
175 }
176 } else {
177 mapIsolatedUidsToHostUidInLogEvent(mUidMap, *event);
178 }
179 }
180
onIsolatedUidChangedEventLocked(const LogEvent & event)181 void StatsLogProcessor::onIsolatedUidChangedEventLocked(const LogEvent& event) {
182 status_t err = NO_ERROR, err2 = NO_ERROR, err3 = NO_ERROR;
183 bool is_create = event.GetBool(3, &err);
184 auto parent_uid = int(event.GetLong(1, &err2));
185 auto isolated_uid = int(event.GetLong(2, &err3));
186 if (err == NO_ERROR && err2 == NO_ERROR && err3 == NO_ERROR) {
187 if (is_create) {
188 mUidMap->assignIsolatedUid(isolated_uid, parent_uid);
189 } else {
190 mUidMap->removeIsolatedUid(isolated_uid);
191 }
192 } else {
193 ALOGE("Failed to parse uid in the isolated uid change event.");
194 }
195 }
196
onBinaryPushStateChangedEventLocked(LogEvent * event)197 void StatsLogProcessor::onBinaryPushStateChangedEventLocked(LogEvent* event) {
198 pid_t pid = event->GetPid();
199 uid_t uid = event->GetUid();
200 if (!checkPermissionForIds(kPermissionDump, pid, uid) ||
201 !checkPermissionForIds(kPermissionUsage, pid, uid)) {
202 return;
203 }
204 // The Get* functions don't modify the status on success, they only write in
205 // failure statuses, so we can use one status variable for all calls then
206 // check if it is no longer NO_ERROR.
207 status_t err = NO_ERROR;
208 InstallTrainInfo trainInfo;
209 trainInfo.trainName = string(event->GetString(1 /*train name field id*/, &err));
210 trainInfo.trainVersionCode = event->GetLong(2 /*train version field id*/, &err);
211 trainInfo.requiresStaging = event->GetBool(3 /*requires staging field id*/, &err);
212 trainInfo.rollbackEnabled = event->GetBool(4 /*rollback enabled field id*/, &err);
213 trainInfo.requiresLowLatencyMonitor =
214 event->GetBool(5 /*requires low latency monitor field id*/, &err);
215 trainInfo.status = int32_t(event->GetLong(6 /*state field id*/, &err));
216 std::vector<uint8_t> trainExperimentIdBytes =
217 event->GetStorage(7 /*experiment ids field id*/, &err);
218 bool is_rollback = event->GetBool(10 /*is rollback field id*/, &err);
219
220 if (err != NO_ERROR) {
221 ALOGE("Failed to parse fields in binary push state changed log event");
222 return;
223 }
224 ExperimentIds trainExperimentIds;
225 if (!trainExperimentIds.ParseFromArray(trainExperimentIdBytes.data(),
226 trainExperimentIdBytes.size())) {
227 ALOGE("Failed to parse experimentids in binary push state changed.");
228 return;
229 }
230 trainInfo.experimentIds = {trainExperimentIds.experiment_id().begin(),
231 trainExperimentIds.experiment_id().end()};
232
233 // Update the train info on disk and get any data the logevent is missing.
234 getAndUpdateTrainInfoOnDisk(is_rollback, &trainInfo);
235
236 std::vector<uint8_t> trainExperimentIdProto;
237 writeExperimentIdsToProto(trainInfo.experimentIds, &trainExperimentIdProto);
238 int32_t userId = multiuser_get_user_id(uid);
239
240 event->updateValue(2 /*train version field id*/, trainInfo.trainVersionCode, LONG);
241 event->updateValue(7 /*experiment ids field id*/, trainExperimentIdProto, STORAGE);
242 event->updateValue(8 /*user id field id*/, userId, INT);
243
244 // If this event is a rollback event, then the following bits in the event
245 // are invalid and we will need to update them with the values we pulled
246 // from disk.
247 if (is_rollback) {
248 int bit = trainInfo.requiresStaging ? 1 : 0;
249 event->updateValue(3 /*requires staging field id*/, bit, INT);
250 bit = trainInfo.rollbackEnabled ? 1 : 0;
251 event->updateValue(4 /*rollback enabled field id*/, bit, INT);
252 bit = trainInfo.requiresLowLatencyMonitor ? 1 : 0;
253 event->updateValue(5 /*requires low latency monitor field id*/, bit, INT);
254 }
255 }
256
getAndUpdateTrainInfoOnDisk(bool is_rollback,InstallTrainInfo * trainInfo)257 void StatsLogProcessor::getAndUpdateTrainInfoOnDisk(bool is_rollback,
258 InstallTrainInfo* trainInfo) {
259 // If the train name is empty, we don't know which train to attribute the
260 // event to, so return early.
261 if (trainInfo->trainName.empty()) {
262 return;
263 }
264 bool readTrainInfoSuccess = false;
265 InstallTrainInfo trainInfoOnDisk;
266 readTrainInfoSuccess = StorageManager::readTrainInfo(trainInfo->trainName, trainInfoOnDisk);
267
268 bool resetExperimentIds = false;
269 if (readTrainInfoSuccess) {
270 // Keep the old train version if we received an empty version.
271 if (trainInfo->trainVersionCode == -1) {
272 trainInfo->trainVersionCode = trainInfoOnDisk.trainVersionCode;
273 } else if (trainInfo->trainVersionCode != trainInfoOnDisk.trainVersionCode) {
274 // Reset experiment ids if we receive a new non-empty train version.
275 resetExperimentIds = true;
276 }
277
278 // Reset if we received a different experiment id.
279 if (!trainInfo->experimentIds.empty() &&
280 (trainInfoOnDisk.experimentIds.empty() ||
281 trainInfo->experimentIds.at(0) != trainInfoOnDisk.experimentIds[0])) {
282 resetExperimentIds = true;
283 }
284 }
285
286 // Find the right experiment IDs
287 if ((!resetExperimentIds || is_rollback) && readTrainInfoSuccess) {
288 trainInfo->experimentIds = trainInfoOnDisk.experimentIds;
289 }
290
291 if (!trainInfo->experimentIds.empty()) {
292 int64_t firstId = trainInfo->experimentIds.at(0);
293 auto& ids = trainInfo->experimentIds;
294 switch (trainInfo->status) {
295 case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALL_SUCCESS:
296 if (find(ids.begin(), ids.end(), firstId + 1) == ids.end()) {
297 ids.push_back(firstId + 1);
298 }
299 break;
300 case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_INITIATED:
301 if (find(ids.begin(), ids.end(), firstId + 2) == ids.end()) {
302 ids.push_back(firstId + 2);
303 }
304 break;
305 case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_SUCCESS:
306 if (find(ids.begin(), ids.end(), firstId + 3) == ids.end()) {
307 ids.push_back(firstId + 3);
308 }
309 break;
310 }
311 }
312
313 // If this event is a rollback event, the following fields are invalid and
314 // need to be replaced by the fields stored to disk.
315 if (is_rollback) {
316 trainInfo->requiresStaging = trainInfoOnDisk.requiresStaging;
317 trainInfo->rollbackEnabled = trainInfoOnDisk.rollbackEnabled;
318 trainInfo->requiresLowLatencyMonitor = trainInfoOnDisk.requiresLowLatencyMonitor;
319 }
320
321 StorageManager::writeTrainInfo(*trainInfo);
322 }
323
onWatchdogRollbackOccurredLocked(LogEvent * event)324 void StatsLogProcessor::onWatchdogRollbackOccurredLocked(LogEvent* event) {
325 pid_t pid = event->GetPid();
326 uid_t uid = event->GetUid();
327 if (!checkPermissionForIds(kPermissionDump, pid, uid) ||
328 !checkPermissionForIds(kPermissionUsage, pid, uid)) {
329 return;
330 }
331 // The Get* functions don't modify the status on success, they only write in
332 // failure statuses, so we can use one status variable for all calls then
333 // check if it is no longer NO_ERROR.
334 status_t err = NO_ERROR;
335 int32_t rollbackType = int32_t(event->GetInt(1 /*rollback type field id*/, &err));
336 string packageName = string(event->GetString(2 /*package name field id*/, &err));
337
338 if (err != NO_ERROR) {
339 ALOGE("Failed to parse fields in watchdog rollback occurred log event");
340 return;
341 }
342
343 vector<int64_t> experimentIds =
344 processWatchdogRollbackOccurred(rollbackType, packageName);
345 vector<uint8_t> experimentIdProto;
346 writeExperimentIdsToProto(experimentIds, &experimentIdProto);
347
348 event->updateValue(6 /*experiment ids field id*/, experimentIdProto, STORAGE);
349 }
350
processWatchdogRollbackOccurred(const int32_t rollbackTypeIn,const string & packageNameIn)351 vector<int64_t> StatsLogProcessor::processWatchdogRollbackOccurred(const int32_t rollbackTypeIn,
352 const string& packageNameIn) {
353 // If the package name is empty, we can't attribute it to any train, so
354 // return early.
355 if (packageNameIn.empty()) {
356 return vector<int64_t>();
357 }
358 bool readTrainInfoSuccess = false;
359 InstallTrainInfo trainInfoOnDisk;
360 // We use the package name of the event as the train name.
361 readTrainInfoSuccess = StorageManager::readTrainInfo(packageNameIn, trainInfoOnDisk);
362
363 if (!readTrainInfoSuccess) {
364 return vector<int64_t>();
365 }
366
367 if (trainInfoOnDisk.experimentIds.empty()) {
368 return vector<int64_t>();
369 }
370
371 int64_t firstId = trainInfoOnDisk.experimentIds[0];
372 auto& ids = trainInfoOnDisk.experimentIds;
373 switch (rollbackTypeIn) {
374 case util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_INITIATE:
375 if (find(ids.begin(), ids.end(), firstId + 4) == ids.end()) {
376 ids.push_back(firstId + 4);
377 }
378 StorageManager::writeTrainInfo(trainInfoOnDisk);
379 break;
380 case util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_SUCCESS:
381 if (find(ids.begin(), ids.end(), firstId + 5) == ids.end()) {
382 ids.push_back(firstId + 5);
383 }
384 StorageManager::writeTrainInfo(trainInfoOnDisk);
385 break;
386 }
387
388 return trainInfoOnDisk.experimentIds;
389 }
390
resetConfigs()391 void StatsLogProcessor::resetConfigs() {
392 std::lock_guard<std::mutex> lock(mMetricsMutex);
393 resetConfigsLocked(getElapsedRealtimeNs());
394 }
395
resetConfigsLocked(const int64_t timestampNs)396 void StatsLogProcessor::resetConfigsLocked(const int64_t timestampNs) {
397 std::vector<ConfigKey> configKeys;
398 for (auto it = mMetricsManagers.begin(); it != mMetricsManagers.end(); it++) {
399 configKeys.push_back(it->first);
400 }
401 resetConfigsLocked(timestampNs, configKeys);
402 }
403
OnLogEvent(LogEvent * event)404 void StatsLogProcessor::OnLogEvent(LogEvent* event) {
405 ATRACE_NAME(getOnLogEventCallName(event->GetTagId()));
406 OnLogEvent(event, getElapsedRealtimeNs());
407 }
408
OnLogEvent(LogEvent * event,int64_t elapsedRealtimeNs)409 void StatsLogProcessor::OnLogEvent(LogEvent* event, int64_t elapsedRealtimeNs) {
410 std::lock_guard<std::mutex> lock(mMetricsMutex);
411
412 // Tell StatsdStats about new event
413 const int64_t eventElapsedTimeNs = event->GetElapsedTimestampNs();
414 const int atomId = event->GetTagId();
415 StatsdStats::getInstance().noteAtomLogged(atomId, eventElapsedTimeNs / NS_PER_SEC,
416 event->isParsedHeaderOnly());
417 if (!event->isValid()) {
418 StatsdStats::getInstance().noteAtomError(atomId);
419 return;
420 }
421
422 // Hard-coded logic to update train info on disk and fill in any information
423 // this log event may be missing.
424 if (atomId == util::BINARY_PUSH_STATE_CHANGED) {
425 onBinaryPushStateChangedEventLocked(event);
426 }
427
428 // Hard-coded logic to update experiment ids on disk for certain rollback
429 // types and fill the rollback atom with experiment ids
430 if (atomId == util::WATCHDOG_ROLLBACK_OCCURRED) {
431 onWatchdogRollbackOccurredLocked(event);
432 }
433
434 if (mPrintAllLogs) {
435 ALOGI("%s", event->ToString().c_str());
436 }
437 resetIfConfigTtlExpiredLocked(eventElapsedTimeNs);
438
439 // Hard-coded logic to update the isolated uid's in the uid-map.
440 // The field numbers need to be currently updated by hand with atoms.proto
441 if (atomId == util::ISOLATED_UID_CHANGED) {
442 onIsolatedUidChangedEventLocked(*event);
443 } else {
444 // Map the isolated uid to host uid if necessary.
445 mapIsolatedUidToHostUidIfNecessaryLocked(event);
446 }
447
448 StateManager::getInstance().onLogEvent(*event);
449
450 if (mMetricsManagers.empty()) {
451 return;
452 }
453
454 bool fireAlarm = false;
455 {
456 std::lock_guard<std::mutex> anomalyLock(mAnomalyAlarmMutex);
457 if (mNextAnomalyAlarmTime != 0 &&
458 MillisToNano(mNextAnomalyAlarmTime) <= elapsedRealtimeNs) {
459 mNextAnomalyAlarmTime = 0;
460 VLOG("informing anomaly alarm at time %lld", (long long)elapsedRealtimeNs);
461 fireAlarm = true;
462 }
463 }
464 if (fireAlarm) {
465 informAnomalyAlarmFiredLocked(NanoToMillis(elapsedRealtimeNs));
466 }
467
468 const int64_t curTimeSec = NanoToSeconds(elapsedRealtimeNs);
469 if (curTimeSec - mLastPullerCacheClearTimeSec > StatsdStats::kPullerCacheClearIntervalSec) {
470 mPullerManager->ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
471 mLastPullerCacheClearTimeSec = curTimeSec;
472 }
473
474 flushRestrictedDataIfNecessaryLocked(elapsedRealtimeNs);
475 enforceDataTtlsIfNecessaryLocked(getWallClockNs(), elapsedRealtimeNs);
476 enforceDbGuardrailsIfNecessaryLocked(getWallClockNs(), elapsedRealtimeNs);
477
478 if (!validateAppBreadcrumbEvent(*event)) {
479 return;
480 }
481
482 std::unordered_set<int> uidsWithActiveConfigsChanged;
483 std::unordered_map<int, std::vector<int64_t>> activeConfigsPerUid;
484
485 // pass the event to metrics managers.
486 for (auto& pair : mMetricsManagers) {
487 if (event->isRestricted() && !pair.second->hasRestrictedMetricsDelegate()) {
488 continue;
489 }
490 int uid = pair.first.GetUid();
491 int64_t configId = pair.first.GetId();
492 bool isPrevActive = pair.second->isActive();
493 pair.second->onLogEvent(*event);
494 bool isCurActive = pair.second->isActive();
495 // Map all active configs by uid.
496 if (isCurActive) {
497 auto activeConfigs = activeConfigsPerUid.find(uid);
498 if (activeConfigs != activeConfigsPerUid.end()) {
499 activeConfigs->second.push_back(configId);
500 } else {
501 vector<int64_t> newActiveConfigs;
502 newActiveConfigs.push_back(configId);
503 activeConfigsPerUid[uid] = newActiveConfigs;
504 }
505 }
506 // The activation state of this config changed.
507 if (isPrevActive != isCurActive) {
508 VLOG("Active status changed for uid %d", uid);
509 uidsWithActiveConfigsChanged.insert(uid);
510 StatsdStats::getInstance().noteActiveStatusChanged(pair.first, isCurActive);
511 }
512 flushIfNecessaryLocked(pair.first, *(pair.second));
513 }
514
515 // Don't use the event timestamp for the guardrail.
516 for (int uid : uidsWithActiveConfigsChanged) {
517 // Send broadcast so that receivers can pull data.
518 auto lastBroadcastTime = mLastActivationBroadcastTimes.find(uid);
519 if (lastBroadcastTime != mLastActivationBroadcastTimes.end()) {
520 if (elapsedRealtimeNs - lastBroadcastTime->second <
521 StatsdStats::kMinActivationBroadcastPeriodNs) {
522 StatsdStats::getInstance().noteActivationBroadcastGuardrailHit(uid);
523 VLOG("StatsD would've sent an activation broadcast but the rate limit stopped us.");
524 return;
525 }
526 }
527 auto activeConfigs = activeConfigsPerUid.find(uid);
528 if (activeConfigs != activeConfigsPerUid.end()) {
529 if (mSendActivationBroadcast(uid, activeConfigs->second)) {
530 VLOG("StatsD sent activation notice for uid %d", uid);
531 mLastActivationBroadcastTimes[uid] = elapsedRealtimeNs;
532 }
533 } else {
534 std::vector<int64_t> emptyActiveConfigs;
535 if (mSendActivationBroadcast(uid, emptyActiveConfigs)) {
536 VLOG("StatsD sent EMPTY activation notice for uid %d", uid);
537 mLastActivationBroadcastTimes[uid] = elapsedRealtimeNs;
538 }
539 }
540 }
541 }
542
GetActiveConfigs(const int uid,vector<int64_t> & outActiveConfigs)543 void StatsLogProcessor::GetActiveConfigs(const int uid, vector<int64_t>& outActiveConfigs) {
544 std::lock_guard<std::mutex> lock(mMetricsMutex);
545 GetActiveConfigsLocked(uid, outActiveConfigs);
546 }
547
GetActiveConfigsLocked(const int uid,vector<int64_t> & outActiveConfigs)548 void StatsLogProcessor::GetActiveConfigsLocked(const int uid, vector<int64_t>& outActiveConfigs) {
549 outActiveConfigs.clear();
550 for (auto& pair : mMetricsManagers) {
551 if (pair.first.GetUid() == uid && pair.second->isActive()) {
552 outActiveConfigs.push_back(pair.first.GetId());
553 }
554 }
555 }
556
OnConfigUpdated(const int64_t timestampNs,const int64_t wallClockNs,const ConfigKey & key,const StatsdConfig & config,bool modularUpdate)557 void StatsLogProcessor::OnConfigUpdated(const int64_t timestampNs, const int64_t wallClockNs,
558 const ConfigKey& key, const StatsdConfig& config,
559 bool modularUpdate) {
560 std::lock_guard<std::mutex> lock(mMetricsMutex);
561 WriteDataToDiskLocked(key, timestampNs, wallClockNs, CONFIG_UPDATED, NO_TIME_CONSTRAINTS);
562 OnConfigUpdatedLocked(timestampNs, key, config, modularUpdate);
563 }
564
OnConfigUpdated(const int64_t timestampNs,const ConfigKey & key,const StatsdConfig & config,bool modularUpdate)565 void StatsLogProcessor::OnConfigUpdated(const int64_t timestampNs, const ConfigKey& key,
566 const StatsdConfig& config, bool modularUpdate) {
567 OnConfigUpdated(timestampNs, getWallClockNs(), key, config, modularUpdate);
568 }
569
OnConfigUpdatedLocked(const int64_t timestampNs,const ConfigKey & key,const StatsdConfig & config,bool modularUpdate)570 void StatsLogProcessor::OnConfigUpdatedLocked(const int64_t timestampNs, const ConfigKey& key,
571 const StatsdConfig& config, bool modularUpdate) {
572 VLOG("Updated configuration for key %s", key.ToString().c_str());
573 const auto& it = mMetricsManagers.find(key);
574 bool configValid = false;
575 if (isAtLeastU() && it != mMetricsManagers.end()) {
576 if (it->second->hasRestrictedMetricsDelegate() !=
577 config.has_restricted_metrics_delegate_package_name()) {
578 // Not a modular update if has_restricted_metrics_delegate changes
579 modularUpdate = false;
580 }
581 if (!modularUpdate && it->second->hasRestrictedMetricsDelegate()) {
582 StatsdStats::getInstance().noteDbDeletionConfigUpdated(key);
583 // Always delete the old db if restricted metrics config is not a
584 // modular update.
585 dbutils::deleteDb(key);
586 }
587 }
588 // Create new config if this is not a modular update or if this is a new config.
589 if (!modularUpdate || it == mMetricsManagers.end()) {
590 sp<MetricsManager> newMetricsManager =
591 new MetricsManager(key, config, mTimeBaseNs, timestampNs, mUidMap, mPullerManager,
592 mAnomalyAlarmMonitor, mPeriodicAlarmMonitor);
593 configValid = newMetricsManager->isConfigValid();
594 if (configValid) {
595 newMetricsManager->init();
596 newMetricsManager->refreshTtl(timestampNs);
597 // Sdk check for U+ is unnecessary because config with restricted metrics delegate
598 // will be invalid on non U+ devices.
599 if (newMetricsManager->hasRestrictedMetricsDelegate()) {
600 mSendRestrictedMetricsBroadcast(key,
601 newMetricsManager->getRestrictedMetricsDelegate(),
602 newMetricsManager->getAllMetricIds());
603 string err;
604 if (!dbutils::updateDeviceInfoTable(key, err)) {
605 ALOGE("Failed to create device_info table for configKey %s, err: %s",
606 key.ToString().c_str(), err.c_str());
607 StatsdStats::getInstance().noteDeviceInfoTableCreationFailed(key);
608 }
609 } else if (it != mMetricsManagers.end() && it->second->hasRestrictedMetricsDelegate()) {
610 mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(),
611 {});
612 }
613 mMetricsManagers[key] = newMetricsManager;
614 VLOG("StatsdConfig valid");
615 }
616 } else {
617 // Preserve the existing MetricsManager, update necessary components and metadata in place.
618 configValid = it->second->updateConfig(config, mTimeBaseNs, timestampNs,
619 mAnomalyAlarmMonitor, mPeriodicAlarmMonitor);
620 if (configValid && it->second->hasRestrictedMetricsDelegate()) {
621 mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(),
622 it->second->getAllMetricIds());
623 }
624 }
625
626 if (configValid && !config.has_restricted_metrics_delegate_package_name()) {
627 // We do not need to track uid map changes for restricted metrics since the uidmap is not
628 // stored in the sqlite db.
629 mUidMap->OnConfigUpdated(key);
630 } else if (configValid && config.has_restricted_metrics_delegate_package_name()) {
631 mUidMap->OnConfigRemoved(key);
632 }
633 if (!configValid) {
634 // If there is any error in the config, don't use it.
635 // Remove any existing config with the same key.
636 ALOGE("StatsdConfig NOT valid");
637 // Send an empty restricted metrics broadcast if the previous config was restricted.
638 if (isAtLeastU() && it != mMetricsManagers.end() &&
639 it->second->hasRestrictedMetricsDelegate()) {
640 mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(), {});
641 StatsdStats::getInstance().noteDbConfigInvalid(key);
642 dbutils::deleteDb(key);
643 }
644 mMetricsManagers.erase(key);
645 mUidMap->OnConfigRemoved(key);
646 }
647
648 updateLogEventFilterLocked();
649 }
650
GetMetricsSize(const ConfigKey & key) const651 size_t StatsLogProcessor::GetMetricsSize(const ConfigKey& key) const {
652 std::lock_guard<std::mutex> lock(mMetricsMutex);
653 auto it = mMetricsManagers.find(key);
654 if (it == mMetricsManagers.end()) {
655 ALOGW("Config source %s does not exist", key.ToString().c_str());
656 return 0;
657 }
658 return it->second->byteSize();
659 }
660
dumpStates(int out,bool verbose) const661 void StatsLogProcessor::dumpStates(int out, bool verbose) const {
662 std::lock_guard<std::mutex> lock(mMetricsMutex);
663 dprintf(out, "MetricsManager count: %lu\n", (unsigned long)mMetricsManagers.size());
664 for (const auto& metricsManager : mMetricsManagers) {
665 metricsManager.second->dumpStates(out, verbose);
666 }
667 }
668
669 /*
670 * onDumpReport dumps serialized ConfigMetricsReportList into proto.
671 */
onDumpReport(const ConfigKey & key,const int64_t dumpTimeStampNs,const int64_t wallClockNs,const bool include_current_partial_bucket,const bool erase_data,const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,ProtoOutputStream * proto)672 void StatsLogProcessor::onDumpReport(const ConfigKey& key, const int64_t dumpTimeStampNs,
673 const int64_t wallClockNs,
674 const bool include_current_partial_bucket,
675 const bool erase_data, const DumpReportReason dumpReportReason,
676 const DumpLatency dumpLatency, ProtoOutputStream* proto) {
677 std::lock_guard<std::mutex> lock(mMetricsMutex);
678
679 auto it = mMetricsManagers.find(key);
680 if (it != mMetricsManagers.end() && it->second->hasRestrictedMetricsDelegate()) {
681 VLOG("Unexpected call to StatsLogProcessor::onDumpReport for restricted metrics.");
682 return;
683 }
684
685 // Start of ConfigKey.
686 uint64_t configKeyToken = proto->start(FIELD_TYPE_MESSAGE | FIELD_ID_CONFIG_KEY);
687 proto->write(FIELD_TYPE_INT32 | FIELD_ID_UID, key.GetUid());
688 proto->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)key.GetId());
689 proto->end(configKeyToken);
690 // End of ConfigKey.
691
692 bool keepFile = false;
693 if (it != mMetricsManagers.end() && it->second->shouldPersistLocalHistory()) {
694 keepFile = true;
695 }
696
697 // Then, check stats-data directory to see there's any file containing
698 // ConfigMetricsReport from previous shutdowns to concatenate to reports.
699 StorageManager::appendConfigMetricsReport(
700 key, proto, erase_data && !keepFile /* should remove file after appending it */,
701 dumpReportReason == ADB_DUMP /*if caller is adb*/);
702
703 if (it != mMetricsManagers.end()) {
704 // This allows another broadcast to be sent within the rate-limit period if we get close to
705 // filling the buffer again soon.
706 mLastBroadcastTimes.erase(key);
707
708 vector<uint8_t> buffer;
709 onConfigMetricsReportLocked(key, dumpTimeStampNs, wallClockNs,
710 include_current_partial_bucket, erase_data, dumpReportReason,
711 dumpLatency, false /* is this data going to be saved on disk */,
712 &buffer);
713 proto->write(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_REPORTS,
714 reinterpret_cast<char*>(buffer.data()), buffer.size());
715 } else {
716 ALOGW("Config source %s does not exist", key.ToString().c_str());
717 }
718
719 if (erase_data) {
720 ++mDumpReportNumbers[key];
721 }
722 proto->write(FIELD_TYPE_INT32 | FIELD_ID_REPORT_NUMBER, mDumpReportNumbers[key]);
723
724 proto->write(FIELD_TYPE_INT32 | FIELD_ID_STATSD_STATS_ID,
725 StatsdStats::getInstance().getStatsdStatsId());
726 if (erase_data) {
727 StatsdStats::getInstance().noteMetricsReportSent(key, proto->size(),
728 mDumpReportNumbers[key]);
729 }
730 }
731
732 /*
733 * onDumpReport dumps serialized ConfigMetricsReportList into outData.
734 */
onDumpReport(const ConfigKey & key,const int64_t dumpTimeStampNs,const int64_t wallClockNs,const bool include_current_partial_bucket,const bool erase_data,const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,vector<uint8_t> * outData)735 void StatsLogProcessor::onDumpReport(const ConfigKey& key, const int64_t dumpTimeStampNs,
736 const int64_t wallClockNs,
737 const bool include_current_partial_bucket,
738 const bool erase_data, const DumpReportReason dumpReportReason,
739 const DumpLatency dumpLatency, vector<uint8_t>* outData) {
740 ProtoOutputStream proto;
741 onDumpReport(key, dumpTimeStampNs, wallClockNs, include_current_partial_bucket, erase_data,
742 dumpReportReason, dumpLatency, &proto);
743
744 if (outData != nullptr) {
745 flushProtoToBuffer(proto, outData);
746 VLOG("output data size %zu", outData->size());
747 }
748 }
749
750 /*
751 * For test use only. Excludes wallclockNs.
752 * onDumpReport dumps serialized ConfigMetricsReportList into outData.
753 */
onDumpReport(const ConfigKey & key,const int64_t dumpTimeStampNs,const bool include_current_partial_bucket,const bool erase_data,const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,vector<uint8_t> * outData)754 void StatsLogProcessor::onDumpReport(const ConfigKey& key, const int64_t dumpTimeStampNs,
755 const bool include_current_partial_bucket,
756 const bool erase_data, const DumpReportReason dumpReportReason,
757 const DumpLatency dumpLatency, vector<uint8_t>* outData) {
758 onDumpReport(key, dumpTimeStampNs, getWallClockNs(), include_current_partial_bucket, erase_data,
759 dumpReportReason, dumpLatency, outData);
760 }
761
762 /*
763 * onConfigMetricsReportLocked dumps serialized ConfigMetricsReport into outData.
764 */
onConfigMetricsReportLocked(const ConfigKey & key,const int64_t dumpTimeStampNs,const int64_t wallClockNs,const bool include_current_partial_bucket,const bool erase_data,const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,const bool dataSavedOnDisk,vector<uint8_t> * buffer)765 void StatsLogProcessor::onConfigMetricsReportLocked(
766 const ConfigKey& key, const int64_t dumpTimeStampNs, const int64_t wallClockNs,
767 const bool include_current_partial_bucket, const bool erase_data,
768 const DumpReportReason dumpReportReason, const DumpLatency dumpLatency,
769 const bool dataSavedOnDisk, vector<uint8_t>* buffer) {
770 // We already checked whether key exists in mMetricsManagers in
771 // WriteDataToDisk.
772 auto it = mMetricsManagers.find(key);
773 if (it == mMetricsManagers.end()) {
774 return;
775 }
776 if (it->second->hasRestrictedMetricsDelegate()) {
777 VLOG("Unexpected call to StatsLogProcessor::onConfigMetricsReportLocked for restricted "
778 "metrics.");
779 // Do not call onDumpReport for restricted metrics.
780 return;
781 }
782
783 // get & forward queue overflow stats to StateManager only when
784 // there is a metric report to be collected, the data loss flags
785 // are not used otherwise
786 processQueueOverflowStatsLocked();
787
788 int64_t lastReportTimeNs = it->second->getLastReportTimeNs();
789 int64_t lastReportWallClockNs = it->second->getLastReportWallClockNs();
790
791 std::set<string> strSet;
792 std::set<int32_t> usedUids;
793
794 int64_t totalSize = it->second->byteSize();
795
796 ProtoOutputStream tempProto;
797 // First, fill in ConfigMetricsReport using current data on memory, which
798 // starts from filling in StatsLogReport's.
799 it->second->onDumpReport(dumpTimeStampNs, wallClockNs, include_current_partial_bucket,
800 erase_data, dumpLatency, &strSet, usedUids, &tempProto);
801
802 // Fill in UidMap if there is at least one metric to report.
803 // This skips the uid map if it's an empty config.
804 if (it->second->getNumMetrics() > 0) {
805 uint64_t uidMapToken = tempProto.start(FIELD_TYPE_MESSAGE | FIELD_ID_UID_MAP);
806 UidMapOptions uidMapOptions = it->second->getUidMapOptions();
807 uidMapOptions.usedUids = std::move(usedUids);
808 mUidMap->appendUidMap(dumpTimeStampNs, key, uidMapOptions,
809 it->second->hashStringInReport() ? &strSet : nullptr, &tempProto);
810 tempProto.end(uidMapToken);
811 }
812
813 // Fill in the timestamps.
814 tempProto.write(FIELD_TYPE_INT64 | FIELD_ID_LAST_REPORT_ELAPSED_NANOS,
815 (long long)lastReportTimeNs);
816 tempProto.write(FIELD_TYPE_INT64 | FIELD_ID_CURRENT_REPORT_ELAPSED_NANOS,
817 (long long)dumpTimeStampNs);
818 tempProto.write(FIELD_TYPE_INT64 | FIELD_ID_LAST_REPORT_WALL_CLOCK_NANOS,
819 (long long)lastReportWallClockNs);
820 tempProto.write(FIELD_TYPE_INT64 | FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS,
821 (long long)wallClockNs);
822 // Dump report reason
823 tempProto.write(FIELD_TYPE_INT32 | FIELD_ID_DUMP_REPORT_REASON, dumpReportReason);
824
825 for (const auto& str : strSet) {
826 tempProto.write(FIELD_TYPE_STRING | FIELD_COUNT_REPEATED | FIELD_ID_STRINGS, str);
827 }
828
829 // Data corrupted reason
830 writeDataCorruptedReasons(tempProto, FIELD_ID_DATA_CORRUPTED_REASON,
831 StatsdStats::getInstance().hasEventQueueOverflow(),
832 StatsdStats::getInstance().hasSocketLoss());
833
834 // Estimated memory bytes
835 tempProto.write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_DATA_BYTES, totalSize);
836
837 flushProtoToBuffer(tempProto, buffer);
838
839 // save buffer to disk if needed
840 if (erase_data && !dataSavedOnDisk && it->second->shouldPersistLocalHistory()) {
841 VLOG("save history to disk");
842 string file_name = StorageManager::getDataHistoryFileName((long)getWallClockSec(),
843 key.GetUid(), key.GetId());
844 StorageManager::writeFile(file_name.c_str(), buffer->data(), buffer->size());
845 }
846 }
847
resetConfigsLocked(const int64_t timestampNs,const std::vector<ConfigKey> & configs)848 void StatsLogProcessor::resetConfigsLocked(const int64_t timestampNs,
849 const std::vector<ConfigKey>& configs) {
850 for (const auto& key : configs) {
851 StatsdConfig config;
852 if (StorageManager::readConfigFromDisk(key, &config)) {
853 // Force a full update when resetting a config.
854 OnConfigUpdatedLocked(timestampNs, key, config, /*modularUpdate=*/false);
855 StatsdStats::getInstance().noteConfigReset(key);
856 } else {
857 ALOGE("Failed to read backup config from disk for : %s", key.ToString().c_str());
858 auto it = mMetricsManagers.find(key);
859 if (it != mMetricsManagers.end()) {
860 it->second->refreshTtl(timestampNs);
861 }
862 }
863 }
864 }
865
resetIfConfigTtlExpiredLocked(const int64_t eventTimeNs)866 void StatsLogProcessor::resetIfConfigTtlExpiredLocked(const int64_t eventTimeNs) {
867 std::vector<ConfigKey> configKeysTtlExpired;
868 for (auto it = mMetricsManagers.begin(); it != mMetricsManagers.end(); it++) {
869 if (it->second != nullptr && !it->second->isInTtl(eventTimeNs)) {
870 configKeysTtlExpired.push_back(it->first);
871 }
872 }
873 if (configKeysTtlExpired.size() > 0) {
874 WriteDataToDiskLocked(CONFIG_RESET, NO_TIME_CONSTRAINTS, getElapsedRealtimeNs(),
875 getWallClockNs());
876 resetConfigsLocked(eventTimeNs, configKeysTtlExpired);
877 }
878 }
879
OnConfigRemoved(const ConfigKey & key)880 void StatsLogProcessor::OnConfigRemoved(const ConfigKey& key) {
881 std::lock_guard<std::mutex> lock(mMetricsMutex);
882 auto it = mMetricsManagers.find(key);
883 if (it != mMetricsManagers.end()) {
884 WriteDataToDiskLocked(key, getElapsedRealtimeNs(), getWallClockNs(), CONFIG_REMOVED,
885 NO_TIME_CONSTRAINTS);
886 if (isAtLeastU() && it->second->hasRestrictedMetricsDelegate()) {
887 StatsdStats::getInstance().noteDbDeletionConfigRemoved(key);
888 dbutils::deleteDb(key);
889 mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(), {});
890 }
891 mMetricsManagers.erase(it);
892 mUidMap->OnConfigRemoved(key);
893 }
894 StatsdStats::getInstance().noteConfigRemoved(key);
895
896 mLastBroadcastTimes.erase(key);
897 mLastByteSizeTimes.erase(key);
898 mDumpReportNumbers.erase(key);
899
900 int uid = key.GetUid();
901 bool lastConfigForUid = true;
902 for (const auto& it : mMetricsManagers) {
903 if (it.first.GetUid() == uid) {
904 lastConfigForUid = false;
905 break;
906 }
907 }
908 if (lastConfigForUid) {
909 mLastActivationBroadcastTimes.erase(uid);
910 }
911
912 if (mMetricsManagers.empty()) {
913 mPullerManager->ForceClearPullerCache();
914 }
915
916 updateLogEventFilterLocked();
917 }
918
919 // TODO(b/267501143): Add unit tests when metric producer is ready
enforceDataTtlsIfNecessaryLocked(const int64_t wallClockNs,const int64_t elapsedRealtimeNs)920 void StatsLogProcessor::enforceDataTtlsIfNecessaryLocked(const int64_t wallClockNs,
921 const int64_t elapsedRealtimeNs) {
922 if (!isAtLeastU()) {
923 return;
924 }
925 if (elapsedRealtimeNs - mLastTtlTime < StatsdStats::kMinTtlCheckPeriodNs) {
926 return;
927 }
928 enforceDataTtlsLocked(wallClockNs, elapsedRealtimeNs);
929 }
930
flushRestrictedDataIfNecessaryLocked(const int64_t elapsedRealtimeNs)931 void StatsLogProcessor::flushRestrictedDataIfNecessaryLocked(const int64_t elapsedRealtimeNs) {
932 if (!isAtLeastU()) {
933 return;
934 }
935 if (elapsedRealtimeNs - mLastFlushRestrictedTime < StatsdStats::kMinFlushRestrictedPeriodNs) {
936 return;
937 }
938 flushRestrictedDataLocked(elapsedRealtimeNs);
939 }
940
querySql(const string & sqlQuery,const int32_t minSqlClientVersion,const optional<vector<uint8_t>> & policyConfig,const shared_ptr<IStatsQueryCallback> & callback,const int64_t configId,const string & configPackage,const int32_t callingUid)941 void StatsLogProcessor::querySql(const string& sqlQuery, const int32_t minSqlClientVersion,
942 const optional<vector<uint8_t>>& policyConfig,
943 const shared_ptr<IStatsQueryCallback>& callback,
944 const int64_t configId, const string& configPackage,
945 const int32_t callingUid) {
946 std::lock_guard<std::mutex> lock(mMetricsMutex);
947 string err = "";
948
949 if (!isAtLeastU()) {
950 ALOGW("Restricted metrics query invoked on U- device");
951 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
952 configId, configPackage, std::nullopt, callingUid,
953 InvalidQueryReason(FLAG_DISABLED));
954 return;
955 }
956
957 const int64_t elapsedRealtimeNs = getElapsedRealtimeNs();
958
959 // TODO(b/268416460): validate policyConfig here
960
961 if (minSqlClientVersion > dbutils::getDbVersion()) {
962 callback->sendFailure(StringPrintf(
963 "Unsupported sqlite version. Installed Version: %d, Requested Version: %d.",
964 dbutils::getDbVersion(), minSqlClientVersion));
965 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
966 configId, configPackage, std::nullopt, callingUid,
967 InvalidQueryReason(UNSUPPORTED_SQLITE_VERSION));
968 return;
969 }
970
971 set<int32_t> configPackageUids;
972 const auto& uidMapItr = UidMap::sAidToUidMapping.find(configPackage);
973 if (uidMapItr != UidMap::sAidToUidMapping.end()) {
974 configPackageUids.insert(uidMapItr->second);
975 } else {
976 configPackageUids = mUidMap->getAppUid(configPackage);
977 }
978
979 InvalidQueryReason invalidQueryReason;
980 set<ConfigKey> keysToQuery = getRestrictedConfigKeysToQueryLocked(
981 callingUid, configId, configPackageUids, err, invalidQueryReason);
982
983 if (keysToQuery.empty()) {
984 callback->sendFailure(err);
985 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
986 configId, configPackage, std::nullopt, callingUid,
987 InvalidQueryReason(invalidQueryReason));
988 return;
989 }
990
991 if (keysToQuery.size() > 1) {
992 err = "Ambiguous ConfigKey";
993 callback->sendFailure(err);
994 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
995 configId, configPackage, std::nullopt, callingUid,
996 InvalidQueryReason(AMBIGUOUS_CONFIG_KEY));
997 return;
998 }
999
1000 flushRestrictedDataLocked(elapsedRealtimeNs);
1001 enforceDataTtlsLocked(getWallClockNs(), elapsedRealtimeNs);
1002
1003 std::vector<std::vector<std::string>> rows;
1004 std::vector<int32_t> columnTypes;
1005 std::vector<string> columnNames;
1006 if (!dbutils::query(*(keysToQuery.begin()), sqlQuery, rows, columnTypes, columnNames, err)) {
1007 callback->sendFailure(StringPrintf("failed to query db %s:", err.c_str()));
1008 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
1009 configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
1010 InvalidQueryReason(QUERY_FAILURE), err.c_str());
1011 return;
1012 }
1013
1014 vector<string> queryData;
1015 queryData.reserve(rows.size() * columnNames.size());
1016 // TODO(b/268415904): avoid this vector transformation.
1017 if (columnNames.size() != columnTypes.size()) {
1018 callback->sendFailure("Inconsistent row sizes");
1019 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
1020 configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
1021 InvalidQueryReason(INCONSISTENT_ROW_SIZE));
1022 }
1023 for (size_t i = 0; i < rows.size(); ++i) {
1024 if (rows[i].size() != columnNames.size()) {
1025 callback->sendFailure("Inconsistent row sizes");
1026 StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
1027 configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
1028 InvalidQueryReason(INCONSISTENT_ROW_SIZE));
1029 return;
1030 }
1031 queryData.insert(std::end(queryData), std::make_move_iterator(std::begin(rows[i])),
1032 std::make_move_iterator(std::end(rows[i])));
1033 }
1034 callback->sendResults(queryData, columnNames, columnTypes, rows.size());
1035 StatsdStats::getInstance().noteQueryRestrictedMetricSucceed(
1036 configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
1037 /*queryLatencyNs=*/getElapsedRealtimeNs() - elapsedRealtimeNs);
1038 }
1039
getRestrictedConfigKeysToQueryLocked(const int32_t callingUid,const int64_t configId,const set<int32_t> & configPackageUids,string & err,InvalidQueryReason & invalidQueryReason)1040 set<ConfigKey> StatsLogProcessor::getRestrictedConfigKeysToQueryLocked(
1041 const int32_t callingUid, const int64_t configId, const set<int32_t>& configPackageUids,
1042 string& err, InvalidQueryReason& invalidQueryReason) {
1043 set<ConfigKey> matchedConfigKeys;
1044 for (auto uid : configPackageUids) {
1045 ConfigKey configKey(uid, configId);
1046 if (mMetricsManagers.find(configKey) != mMetricsManagers.end()) {
1047 matchedConfigKeys.insert(configKey);
1048 }
1049 }
1050
1051 set<ConfigKey> excludedKeys;
1052 for (auto& configKey : matchedConfigKeys) {
1053 auto it = mMetricsManagers.find(configKey);
1054 if (!it->second->validateRestrictedMetricsDelegate(callingUid)) {
1055 excludedKeys.insert(configKey);
1056 };
1057 }
1058
1059 set<ConfigKey> result;
1060 std::set_difference(matchedConfigKeys.begin(), matchedConfigKeys.end(), excludedKeys.begin(),
1061 excludedKeys.end(), std::inserter(result, result.end()));
1062 if (matchedConfigKeys.empty()) {
1063 err = "No configs found matching the config key";
1064 invalidQueryReason = InvalidQueryReason(CONFIG_KEY_NOT_FOUND);
1065 } else if (result.empty()) {
1066 err = "No matching configs for restricted metrics delegate";
1067 invalidQueryReason = InvalidQueryReason(CONFIG_KEY_WITH_UNMATCHED_DELEGATE);
1068 }
1069
1070 return result;
1071 }
1072
EnforceDataTtls(const int64_t wallClockNs,const int64_t elapsedRealtimeNs)1073 void StatsLogProcessor::EnforceDataTtls(const int64_t wallClockNs,
1074 const int64_t elapsedRealtimeNs) {
1075 if (!isAtLeastU()) {
1076 return;
1077 }
1078 std::lock_guard<std::mutex> lock(mMetricsMutex);
1079 enforceDataTtlsLocked(wallClockNs, elapsedRealtimeNs);
1080 }
1081
enforceDataTtlsLocked(const int64_t wallClockNs,const int64_t elapsedRealtimeNs)1082 void StatsLogProcessor::enforceDataTtlsLocked(const int64_t wallClockNs,
1083 const int64_t elapsedRealtimeNs) {
1084 for (const auto& itr : mMetricsManagers) {
1085 itr.second->enforceRestrictedDataTtls(wallClockNs);
1086 }
1087 mLastTtlTime = elapsedRealtimeNs;
1088 }
1089
enforceDbGuardrailsIfNecessaryLocked(const int64_t wallClockNs,const int64_t elapsedRealtimeNs)1090 void StatsLogProcessor::enforceDbGuardrailsIfNecessaryLocked(const int64_t wallClockNs,
1091 const int64_t elapsedRealtimeNs) {
1092 if (elapsedRealtimeNs - mLastDbGuardrailEnforcementTime <
1093 StatsdStats::kMinDbGuardrailEnforcementPeriodNs) {
1094 return;
1095 }
1096 StorageManager::enforceDbGuardrails(STATS_RESTRICTED_DATA_DIR, wallClockNs / NS_PER_SEC,
1097 StatsdStats::kMaxFileSize);
1098 mLastDbGuardrailEnforcementTime = elapsedRealtimeNs;
1099 }
1100
fillRestrictedMetrics(const int64_t configId,const string & configPackage,const int32_t delegateUid,vector<int64_t> * output)1101 void StatsLogProcessor::fillRestrictedMetrics(const int64_t configId, const string& configPackage,
1102 const int32_t delegateUid, vector<int64_t>* output) {
1103 std::lock_guard<std::mutex> lock(mMetricsMutex);
1104
1105 set<int32_t> configPackageUids;
1106 const auto& uidMapItr = UidMap::sAidToUidMapping.find(configPackage);
1107 if (uidMapItr != UidMap::sAidToUidMapping.end()) {
1108 configPackageUids.insert(uidMapItr->second);
1109 } else {
1110 configPackageUids = mUidMap->getAppUid(configPackage);
1111 }
1112 string err;
1113 InvalidQueryReason invalidQueryReason;
1114 set<ConfigKey> keysToGetMetrics = getRestrictedConfigKeysToQueryLocked(
1115 delegateUid, configId, configPackageUids, err, invalidQueryReason);
1116
1117 for (const ConfigKey& key : keysToGetMetrics) {
1118 vector<int64_t> metricIds = mMetricsManagers[key]->getAllMetricIds();
1119 output->insert(output->end(), metricIds.begin(), metricIds.end());
1120 }
1121 }
1122
flushRestrictedDataLocked(const int64_t elapsedRealtimeNs)1123 void StatsLogProcessor::flushRestrictedDataLocked(const int64_t elapsedRealtimeNs) {
1124 for (const auto& it : mMetricsManagers) {
1125 // no-op if metricsManager is not restricted
1126 it.second->flushRestrictedData();
1127 }
1128
1129 mLastFlushRestrictedTime = elapsedRealtimeNs;
1130 }
1131
flushIfNecessaryLocked(const ConfigKey & key,MetricsManager & metricsManager)1132 void StatsLogProcessor::flushIfNecessaryLocked(const ConfigKey& key,
1133 MetricsManager& metricsManager) {
1134 int64_t elapsedRealtimeNs = getElapsedRealtimeNs();
1135 auto lastCheckTime = mLastByteSizeTimes.find(key);
1136 int64_t minCheckPeriodNs = metricsManager.useV2SoftMemoryCalculation()
1137 ? StatsdStats::kMinByteSizeV2CheckPeriodNs
1138 : StatsdStats::kMinByteSizeCheckPeriodNs;
1139 if (lastCheckTime != mLastByteSizeTimes.end()) {
1140 if (elapsedRealtimeNs - lastCheckTime->second < minCheckPeriodNs) {
1141 return;
1142 }
1143 }
1144
1145 // We suspect that the byteSize() computation is expensive, so we set a rate limit.
1146 size_t totalBytes = metricsManager.byteSize();
1147
1148 mLastByteSizeTimes[key] = elapsedRealtimeNs;
1149 const size_t kBytesPerConfig = metricsManager.hasRestrictedMetricsDelegate()
1150 ? StatsdStats::kBytesPerRestrictedConfigTriggerFlush
1151 : metricsManager.getTriggerGetDataBytes();
1152 bool requestDump = false;
1153 if (totalBytes > metricsManager.getMaxMetricsBytes()) {
1154 // Too late. We need to start clearing data.
1155 metricsManager.dropData(elapsedRealtimeNs);
1156 StatsdStats::getInstance().noteDataDropped(key, totalBytes);
1157 VLOG("StatsD had to toss out metrics for %s", key.ToString().c_str());
1158 } else if ((totalBytes > kBytesPerConfig) ||
1159 (mOnDiskDataConfigs.find(key) != mOnDiskDataConfigs.end())) {
1160 // Request to dump if:
1161 // 1. in memory data > threshold OR
1162 // 2. config has old data report on disk.
1163 requestDump = true;
1164 }
1165
1166 if (requestDump) {
1167 if (metricsManager.hasRestrictedMetricsDelegate()) {
1168 metricsManager.flushRestrictedData();
1169 // No need to send broadcast for restricted metrics.
1170 return;
1171 }
1172 // Send broadcast so that receivers can pull data.
1173 auto lastBroadcastTime = mLastBroadcastTimes.find(key);
1174 if (lastBroadcastTime != mLastBroadcastTimes.end()) {
1175 if (elapsedRealtimeNs - lastBroadcastTime->second <
1176 StatsdStats::kMinBroadcastPeriodNs) {
1177 VLOG("StatsD would've sent a broadcast but the rate limit stopped us.");
1178 return;
1179 }
1180 }
1181 if (mSendBroadcast(key)) {
1182 mOnDiskDataConfigs.erase(key);
1183 VLOG("StatsD triggered data fetch for %s", key.ToString().c_str());
1184 mLastBroadcastTimes[key] = elapsedRealtimeNs;
1185 StatsdStats::getInstance().noteBroadcastSent(key);
1186 }
1187 }
1188 }
1189
WriteDataToDiskLocked(const ConfigKey & key,const int64_t timestampNs,const int64_t wallClockNs,const DumpReportReason dumpReportReason,const DumpLatency dumpLatency)1190 void StatsLogProcessor::WriteDataToDiskLocked(const ConfigKey& key, const int64_t timestampNs,
1191 const int64_t wallClockNs,
1192 const DumpReportReason dumpReportReason,
1193 const DumpLatency dumpLatency) {
1194 if (mMetricsManagers.find(key) == mMetricsManagers.end() ||
1195 !mMetricsManagers.find(key)->second->shouldWriteToDisk()) {
1196 return;
1197 }
1198 if (mMetricsManagers.find(key)->second->hasRestrictedMetricsDelegate()) {
1199 mMetricsManagers.find(key)->second->flushRestrictedData();
1200 return;
1201 }
1202 vector<uint8_t> buffer;
1203 onConfigMetricsReportLocked(key, timestampNs, wallClockNs,
1204 true /* include_current_partial_bucket*/, true /* erase_data */,
1205 dumpReportReason, dumpLatency, true, &buffer);
1206 string file_name =
1207 StorageManager::getDataFileName((long)getWallClockSec(), key.GetUid(), key.GetId());
1208 StorageManager::writeFile(file_name.c_str(), buffer.data(), buffer.size());
1209
1210 // We were able to write the ConfigMetricsReport to disk, so we should trigger collection ASAP.
1211 mOnDiskDataConfigs.insert(key);
1212 }
1213
SaveActiveConfigsToDisk(int64_t currentTimeNs)1214 void StatsLogProcessor::SaveActiveConfigsToDisk(int64_t currentTimeNs) {
1215 std::lock_guard<std::mutex> lock(mMetricsMutex);
1216 const int64_t timeNs = getElapsedRealtimeNs();
1217 // Do not write to disk if we already have in the last few seconds.
1218 if (static_cast<unsigned long long> (timeNs) <
1219 mLastActiveMetricsWriteNs + WRITE_DATA_COOL_DOWN_SEC * NS_PER_SEC) {
1220 ALOGI("Statsd skipping writing active metrics to disk. Already wrote data in last %d seconds",
1221 WRITE_DATA_COOL_DOWN_SEC);
1222 return;
1223 }
1224 mLastActiveMetricsWriteNs = timeNs;
1225
1226 ProtoOutputStream proto;
1227 WriteActiveConfigsToProtoOutputStreamLocked(currentTimeNs, DEVICE_SHUTDOWN, &proto);
1228
1229 string file_name = StringPrintf("%s/active_metrics", STATS_ACTIVE_METRIC_DIR);
1230 StorageManager::deleteFile(file_name.c_str());
1231 android::base::unique_fd fd(
1232 open(file_name.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, S_IRUSR | S_IWUSR));
1233 if (fd == -1) {
1234 ALOGE("Attempt to write %s but failed", file_name.c_str());
1235 return;
1236 }
1237 proto.flush(fd.get());
1238 }
1239
SaveMetadataToDisk(int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs)1240 void StatsLogProcessor::SaveMetadataToDisk(int64_t currentWallClockTimeNs,
1241 int64_t systemElapsedTimeNs) {
1242 std::lock_guard<std::mutex> lock(mMetricsMutex);
1243 // Do not write to disk if we already have in the last few seconds.
1244 if (static_cast<unsigned long long> (systemElapsedTimeNs) <
1245 mLastMetadataWriteNs + WRITE_DATA_COOL_DOWN_SEC * NS_PER_SEC) {
1246 ALOGI("Statsd skipping writing metadata to disk. Already wrote data in last %d seconds",
1247 WRITE_DATA_COOL_DOWN_SEC);
1248 return;
1249 }
1250 mLastMetadataWriteNs = systemElapsedTimeNs;
1251
1252 metadata::StatsMetadataList metadataList;
1253 WriteMetadataToProtoLocked(
1254 currentWallClockTimeNs, systemElapsedTimeNs, &metadataList);
1255
1256 string file_name = StringPrintf("%s/metadata", STATS_METADATA_DIR);
1257 StorageManager::deleteFile(file_name.c_str());
1258
1259 if (metadataList.stats_metadata_size() == 0) {
1260 // Skip the write if we have nothing to write.
1261 return;
1262 }
1263
1264 std::string data;
1265 metadataList.SerializeToString(&data);
1266 StorageManager::writeFile(file_name.c_str(), data.c_str(), data.size());
1267 }
1268
WriteMetadataToProto(int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs,metadata::StatsMetadataList * metadataList)1269 void StatsLogProcessor::WriteMetadataToProto(int64_t currentWallClockTimeNs,
1270 int64_t systemElapsedTimeNs,
1271 metadata::StatsMetadataList* metadataList) {
1272 std::lock_guard<std::mutex> lock(mMetricsMutex);
1273 WriteMetadataToProtoLocked(currentWallClockTimeNs, systemElapsedTimeNs, metadataList);
1274 }
1275
WriteMetadataToProtoLocked(int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs,metadata::StatsMetadataList * metadataList)1276 void StatsLogProcessor::WriteMetadataToProtoLocked(int64_t currentWallClockTimeNs,
1277 int64_t systemElapsedTimeNs,
1278 metadata::StatsMetadataList* metadataList) {
1279 for (const auto& pair : mMetricsManagers) {
1280 const sp<MetricsManager>& metricsManager = pair.second;
1281 metadata::StatsMetadata* statsMetadata = metadataList->add_stats_metadata();
1282 bool metadataWritten = metricsManager->writeMetadataToProto(currentWallClockTimeNs,
1283 systemElapsedTimeNs, statsMetadata);
1284 if (!metadataWritten) {
1285 metadataList->mutable_stats_metadata()->RemoveLast();
1286 }
1287 }
1288 }
1289
LoadMetadataFromDisk(int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs)1290 void StatsLogProcessor::LoadMetadataFromDisk(int64_t currentWallClockTimeNs,
1291 int64_t systemElapsedTimeNs) {
1292 std::lock_guard<std::mutex> lock(mMetricsMutex);
1293 string file_name = StringPrintf("%s/metadata", STATS_METADATA_DIR);
1294 int fd = open(file_name.c_str(), O_RDONLY | O_CLOEXEC);
1295 if (-1 == fd) {
1296 VLOG("Attempt to read %s but failed", file_name.c_str());
1297 StorageManager::deleteFile(file_name.c_str());
1298 return;
1299 }
1300 string content;
1301 if (!android::base::ReadFdToString(fd, &content)) {
1302 ALOGE("Attempt to read %s but failed", file_name.c_str());
1303 close(fd);
1304 StorageManager::deleteFile(file_name.c_str());
1305 return;
1306 }
1307
1308 close(fd);
1309
1310 metadata::StatsMetadataList statsMetadataList;
1311 if (!statsMetadataList.ParseFromString(content)) {
1312 ALOGE("Attempt to read %s but failed; failed to metadata", file_name.c_str());
1313 StorageManager::deleteFile(file_name.c_str());
1314 return;
1315 }
1316 SetMetadataStateLocked(statsMetadataList, currentWallClockTimeNs, systemElapsedTimeNs);
1317 StorageManager::deleteFile(file_name.c_str());
1318 }
1319
SetMetadataState(const metadata::StatsMetadataList & statsMetadataList,int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs)1320 void StatsLogProcessor::SetMetadataState(const metadata::StatsMetadataList& statsMetadataList,
1321 int64_t currentWallClockTimeNs,
1322 int64_t systemElapsedTimeNs) {
1323 std::lock_guard<std::mutex> lock(mMetricsMutex);
1324 SetMetadataStateLocked(statsMetadataList, currentWallClockTimeNs, systemElapsedTimeNs);
1325 }
1326
SetMetadataStateLocked(const metadata::StatsMetadataList & statsMetadataList,int64_t currentWallClockTimeNs,int64_t systemElapsedTimeNs)1327 void StatsLogProcessor::SetMetadataStateLocked(
1328 const metadata::StatsMetadataList& statsMetadataList,
1329 int64_t currentWallClockTimeNs,
1330 int64_t systemElapsedTimeNs) {
1331 for (const metadata::StatsMetadata& metadata : statsMetadataList.stats_metadata()) {
1332 ConfigKey key(metadata.config_key().uid(), metadata.config_key().config_id());
1333 auto it = mMetricsManagers.find(key);
1334 if (it == mMetricsManagers.end()) {
1335 ALOGE("No config found for configKey %s", key.ToString().c_str());
1336 continue;
1337 }
1338 VLOG("Setting metadata %s", key.ToString().c_str());
1339 it->second->loadMetadata(metadata, currentWallClockTimeNs, systemElapsedTimeNs);
1340 }
1341 VLOG("Successfully loaded %d metadata.", statsMetadataList.stats_metadata_size());
1342 }
1343
WriteActiveConfigsToProtoOutputStream(int64_t currentTimeNs,const DumpReportReason reason,ProtoOutputStream * proto)1344 void StatsLogProcessor::WriteActiveConfigsToProtoOutputStream(
1345 int64_t currentTimeNs, const DumpReportReason reason, ProtoOutputStream* proto) {
1346 std::lock_guard<std::mutex> lock(mMetricsMutex);
1347 WriteActiveConfigsToProtoOutputStreamLocked(currentTimeNs, reason, proto);
1348 }
1349
WriteActiveConfigsToProtoOutputStreamLocked(int64_t currentTimeNs,const DumpReportReason reason,ProtoOutputStream * proto)1350 void StatsLogProcessor::WriteActiveConfigsToProtoOutputStreamLocked(
1351 int64_t currentTimeNs, const DumpReportReason reason, ProtoOutputStream* proto) {
1352 for (const auto& pair : mMetricsManagers) {
1353 const sp<MetricsManager>& metricsManager = pair.second;
1354 uint64_t configToken = proto->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED |
1355 FIELD_ID_ACTIVE_CONFIG_LIST_CONFIG);
1356 metricsManager->writeActiveConfigToProtoOutputStream(currentTimeNs, reason, proto);
1357 proto->end(configToken);
1358 }
1359 }
LoadActiveConfigsFromDisk()1360 void StatsLogProcessor::LoadActiveConfigsFromDisk() {
1361 std::lock_guard<std::mutex> lock(mMetricsMutex);
1362 string file_name = StringPrintf("%s/active_metrics", STATS_ACTIVE_METRIC_DIR);
1363 int fd = open(file_name.c_str(), O_RDONLY | O_CLOEXEC);
1364 if (-1 == fd) {
1365 VLOG("Attempt to read %s but failed", file_name.c_str());
1366 StorageManager::deleteFile(file_name.c_str());
1367 return;
1368 }
1369 string content;
1370 if (!android::base::ReadFdToString(fd, &content)) {
1371 ALOGE("Attempt to read %s but failed", file_name.c_str());
1372 close(fd);
1373 StorageManager::deleteFile(file_name.c_str());
1374 return;
1375 }
1376
1377 close(fd);
1378
1379 ActiveConfigList activeConfigList;
1380 if (!activeConfigList.ParseFromString(content)) {
1381 ALOGE("Attempt to read %s but failed; failed to load active configs", file_name.c_str());
1382 StorageManager::deleteFile(file_name.c_str());
1383 return;
1384 }
1385 // Passing in mTimeBaseNs only works as long as we only load from disk is when statsd starts.
1386 SetConfigsActiveStateLocked(activeConfigList, mTimeBaseNs);
1387 StorageManager::deleteFile(file_name.c_str());
1388 }
1389
SetConfigsActiveState(const ActiveConfigList & activeConfigList,int64_t currentTimeNs)1390 void StatsLogProcessor::SetConfigsActiveState(const ActiveConfigList& activeConfigList,
1391 int64_t currentTimeNs) {
1392 std::lock_guard<std::mutex> lock(mMetricsMutex);
1393 SetConfigsActiveStateLocked(activeConfigList, currentTimeNs);
1394 }
1395
SetConfigsActiveStateLocked(const ActiveConfigList & activeConfigList,int64_t currentTimeNs)1396 void StatsLogProcessor::SetConfigsActiveStateLocked(const ActiveConfigList& activeConfigList,
1397 int64_t currentTimeNs) {
1398 for (int i = 0; i < activeConfigList.config_size(); i++) {
1399 const auto& config = activeConfigList.config(i);
1400 ConfigKey key(config.uid(), config.id());
1401 auto it = mMetricsManagers.find(key);
1402 if (it == mMetricsManagers.end()) {
1403 ALOGE("No config found for config %s", key.ToString().c_str());
1404 continue;
1405 }
1406 VLOG("Setting active config %s", key.ToString().c_str());
1407 it->second->loadActiveConfig(config, currentTimeNs);
1408 }
1409 VLOG("Successfully loaded %d active configs.", activeConfigList.config_size());
1410 }
1411
WriteDataToDiskLocked(const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,const int64_t elapsedRealtimeNs,const int64_t wallClockNs)1412 void StatsLogProcessor::WriteDataToDiskLocked(const DumpReportReason dumpReportReason,
1413 const DumpLatency dumpLatency,
1414 const int64_t elapsedRealtimeNs,
1415 const int64_t wallClockNs) {
1416 // Do not write to disk if we already have in the last few seconds.
1417 // This is to avoid overwriting files that would have the same name if we
1418 // write twice in the same second.
1419 if (static_cast<unsigned long long>(elapsedRealtimeNs) <
1420 mLastWriteTimeNs + WRITE_DATA_COOL_DOWN_SEC * NS_PER_SEC) {
1421 ALOGI("Statsd skipping writing data to disk. Already wrote data in last %d seconds",
1422 WRITE_DATA_COOL_DOWN_SEC);
1423 return;
1424 }
1425 mLastWriteTimeNs = elapsedRealtimeNs;
1426 for (auto& pair : mMetricsManagers) {
1427 WriteDataToDiskLocked(pair.first, elapsedRealtimeNs, wallClockNs, dumpReportReason,
1428 dumpLatency);
1429 }
1430 }
1431
WriteDataToDisk(const DumpReportReason dumpReportReason,const DumpLatency dumpLatency,const int64_t elapsedRealtimeNs,const int64_t wallClockNs)1432 void StatsLogProcessor::WriteDataToDisk(const DumpReportReason dumpReportReason,
1433 const DumpLatency dumpLatency,
1434 const int64_t elapsedRealtimeNs,
1435 const int64_t wallClockNs) {
1436 std::lock_guard<std::mutex> lock(mMetricsMutex);
1437 WriteDataToDiskLocked(dumpReportReason, dumpLatency, elapsedRealtimeNs, wallClockNs);
1438 }
1439
informPullAlarmFired(const int64_t timestampNs)1440 void StatsLogProcessor::informPullAlarmFired(const int64_t timestampNs) {
1441 std::lock_guard<std::mutex> lock(mMetricsMutex);
1442 mPullerManager->OnAlarmFired(timestampNs);
1443 }
1444
getLastReportTimeNs(const ConfigKey & key)1445 int64_t StatsLogProcessor::getLastReportTimeNs(const ConfigKey& key) {
1446 auto it = mMetricsManagers.find(key);
1447 if (it == mMetricsManagers.end()) {
1448 return 0;
1449 } else {
1450 return it->second->getLastReportTimeNs();
1451 }
1452 }
1453
notifyAppUpgrade(const int64_t eventTimeNs,const string & apk,const int uid,const int64_t version)1454 void StatsLogProcessor::notifyAppUpgrade(const int64_t eventTimeNs, const string& apk,
1455 const int uid, const int64_t version) {
1456 std::lock_guard<std::mutex> lock(mMetricsMutex);
1457 VLOG("Received app upgrade");
1458 StateManager::getInstance().notifyAppChanged(apk, mUidMap);
1459 for (const auto& it : mMetricsManagers) {
1460 it.second->notifyAppUpgrade(eventTimeNs, apk, uid, version);
1461 }
1462 }
1463
notifyAppRemoved(const int64_t eventTimeNs,const string & apk,const int uid)1464 void StatsLogProcessor::notifyAppRemoved(const int64_t eventTimeNs, const string& apk,
1465 const int uid) {
1466 std::lock_guard<std::mutex> lock(mMetricsMutex);
1467 VLOG("Received app removed");
1468 StateManager::getInstance().notifyAppChanged(apk, mUidMap);
1469 for (const auto& it : mMetricsManagers) {
1470 it.second->notifyAppRemoved(eventTimeNs, apk, uid);
1471 }
1472 }
1473
onUidMapReceived(const int64_t eventTimeNs)1474 void StatsLogProcessor::onUidMapReceived(const int64_t eventTimeNs) {
1475 std::lock_guard<std::mutex> lock(mMetricsMutex);
1476 VLOG("Received uid map");
1477 StateManager::getInstance().updateLogSources(mUidMap);
1478 for (const auto& it : mMetricsManagers) {
1479 it.second->onUidMapReceived(eventTimeNs);
1480 }
1481 }
1482
onStatsdInitCompleted(const int64_t elapsedTimeNs)1483 void StatsLogProcessor::onStatsdInitCompleted(const int64_t elapsedTimeNs) {
1484 ATRACE_CALL();
1485 std::lock_guard<std::mutex> lock(mMetricsMutex);
1486 VLOG("Received boot completed signal");
1487 for (const auto& it : mMetricsManagers) {
1488 it.second->onStatsdInitCompleted(elapsedTimeNs);
1489 }
1490 }
1491
noteOnDiskData(const ConfigKey & key)1492 void StatsLogProcessor::noteOnDiskData(const ConfigKey& key) {
1493 std::lock_guard<std::mutex> lock(mMetricsMutex);
1494 mOnDiskDataConfigs.insert(key);
1495 }
1496
setAnomalyAlarm(const int64_t elapsedTimeMillis)1497 void StatsLogProcessor::setAnomalyAlarm(const int64_t elapsedTimeMillis) {
1498 std::lock_guard<std::mutex> lock(mAnomalyAlarmMutex);
1499 mNextAnomalyAlarmTime = elapsedTimeMillis;
1500 }
1501
cancelAnomalyAlarm()1502 void StatsLogProcessor::cancelAnomalyAlarm() {
1503 std::lock_guard<std::mutex> lock(mAnomalyAlarmMutex);
1504 mNextAnomalyAlarmTime = 0;
1505 }
1506
informAnomalyAlarmFiredLocked(const int64_t elapsedTimeMillis)1507 void StatsLogProcessor::informAnomalyAlarmFiredLocked(const int64_t elapsedTimeMillis) {
1508 VLOG("StatsService::informAlarmForSubscriberTriggeringFired was called");
1509 unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
1510 mAnomalyAlarmMonitor->popSoonerThan(static_cast<uint32_t>(elapsedTimeMillis / 1000));
1511 if (alarmSet.size() > 0) {
1512 VLOG("Found periodic alarm fired.");
1513 processFiredAnomalyAlarmsLocked(MillisToNano(elapsedTimeMillis), alarmSet);
1514 } else {
1515 ALOGW("Cannot find an periodic alarm that fired. Perhaps it was recently cancelled.");
1516 }
1517 }
1518
getDefaultAtomIdSet()1519 LogEventFilter::AtomIdSet StatsLogProcessor::getDefaultAtomIdSet() {
1520 // populate hard-coded list of useful atoms
1521 // we add also atoms which could be pushed by statsd itself to simplify the logic
1522 // to handle metric configs update: APP_BREADCRUMB_REPORTED & ANOMALY_DETECTED
1523 LogEventFilter::AtomIdSet allAtomIds{
1524 util::BINARY_PUSH_STATE_CHANGED, util::ISOLATED_UID_CHANGED,
1525 util::APP_BREADCRUMB_REPORTED, util::WATCHDOG_ROLLBACK_OCCURRED,
1526 util::ANOMALY_DETECTED, util::STATS_SOCKET_LOSS_REPORTED};
1527 return allAtomIds;
1528 }
1529
updateLogEventFilterLocked() const1530 void StatsLogProcessor::updateLogEventFilterLocked() const {
1531 VLOG("StatsLogProcessor: Updating allAtomIds");
1532 LogEventFilter::AtomIdSet allAtomIds = getDefaultAtomIdSet();
1533 for (const auto& metricsManager : mMetricsManagers) {
1534 metricsManager.second->addAllAtomIds(allAtomIds);
1535 }
1536 StateManager::getInstance().addAllAtomIds(allAtomIds);
1537 VLOG("StatsLogProcessor: Updating allAtomIds done. Total atoms %d", (int)allAtomIds.size());
1538 mLogEventFilter->setAtomIds(std::move(allAtomIds), this);
1539 }
1540
validateAppBreadcrumbEvent(const LogEvent & event) const1541 bool StatsLogProcessor::validateAppBreadcrumbEvent(const LogEvent& event) const {
1542 if (event.GetTagId() == util::APP_BREADCRUMB_REPORTED) {
1543 // Check that app breadcrumb reported fields are valid.
1544 status_t err = NO_ERROR;
1545
1546 // Uid is 3rd from last field and must match the caller's uid,
1547 // unless that caller is statsd itself (statsd is allowed to spoof uids).
1548 const long appHookUid = event.GetLong(event.size() - 2, &err);
1549 if (err != NO_ERROR) {
1550 VLOG("APP_BREADCRUMB_REPORTED had error when parsing the uid");
1551 return false;
1552 }
1553
1554 // Because the uid within the LogEvent may have been mapped from
1555 // isolated to host, map the loggerUid similarly before comparing.
1556 const int32_t loggerUid = mUidMap->getHostUidOrSelf(event.GetUid());
1557 if (loggerUid != appHookUid && loggerUid != AID_STATSD) {
1558 VLOG("APP_BREADCRUMB_REPORTED has invalid uid: claimed %ld but caller is %d",
1559 appHookUid, loggerUid);
1560 return false;
1561 }
1562
1563 // The state must be from 0,3. This part of code must be manually updated.
1564 const long appHookState = event.GetLong(event.size(), &err);
1565 if (err != NO_ERROR) {
1566 VLOG("APP_BREADCRUMB_REPORTED had error when parsing the state field");
1567 return false;
1568 } else if (appHookState < 0 || appHookState > 3) {
1569 VLOG("APP_BREADCRUMB_REPORTED does not have valid state %ld", appHookState);
1570 return false;
1571 }
1572 }
1573
1574 return true;
1575 }
1576
processQueueOverflowStatsLocked()1577 void StatsLogProcessor::processQueueOverflowStatsLocked() {
1578 auto queueOverflowStats = StatsdStats::getInstance().getQueueOverflowAtomsStats();
1579
1580 for (const auto [atomId, count] : queueOverflowStats) {
1581 // are there new atoms dropped due to queue overflow since previous request
1582 auto droppedAtomStatsIt = mQueueOverflowAtomsStats.find(atomId);
1583 if (droppedAtomStatsIt != mQueueOverflowAtomsStats.end() &&
1584 droppedAtomStatsIt->second == count) {
1585 // no new dropped atoms detected for the atomId
1586 continue;
1587 }
1588
1589 StateManager::getInstance().onLogEventLost(atomId, DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW);
1590 }
1591 mQueueOverflowAtomsStats = std::move(queueOverflowStats);
1592 }
1593
1594 } // namespace statsd
1595 } // namespace os
1596 } // namespace android
1597