1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define STATSD_DEBUG false // STOPSHIP if true
18 #include "Log.h"
19
20 #include "ValueMetricProducer.h"
21
22 #include <kll.h>
23 #include <limits.h>
24 #include <stdlib.h>
25
26 #include "FieldValue.h"
27 #include "HashableDimensionKey.h"
28 #include "guardrail/StatsdStats.h"
29 #include "metrics/NumericValue.h"
30 #include "metrics/parsing_utils/metrics_manager_util.h"
31 #include "stats_log_util.h"
32 #include "stats_util.h"
33
34 using android::util::FIELD_COUNT_REPEATED;
35 using android::util::FIELD_TYPE_BOOL;
36 using android::util::FIELD_TYPE_INT32;
37 using android::util::FIELD_TYPE_INT64;
38 using android::util::FIELD_TYPE_MESSAGE;
39 using android::util::ProtoOutputStream;
40 using dist_proc::aggregation::KllQuantile;
41 using std::optional;
42 using std::shared_ptr;
43 using std::unique_ptr;
44 using std::unordered_map;
45 using std::vector;
46
47 namespace android {
48 namespace os {
49 namespace statsd {
50
51 // for StatsLogReport
52 const int FIELD_ID_ID = 1;
53 const int FIELD_ID_TIME_BASE = 9;
54 const int FIELD_ID_BUCKET_SIZE = 10;
55 const int FIELD_ID_DIMENSION_PATH_IN_WHAT = 11;
56 const int FIELD_ID_IS_ACTIVE = 14;
57 const int FIELD_ID_DIMENSION_GUARDRAIL_HIT = 17;
58 const int FIELD_ID_ESTIMATED_MEMORY_BYTES = 18;
59 const int FIELD_ID_DATA_CORRUPTED_REASON = 19;
60 // for *MetricDataWrapper
61 const int FIELD_ID_DATA = 1;
62 const int FIELD_ID_SKIPPED = 2;
63 // for SkippedBuckets
64 const int FIELD_ID_SKIPPED_START_MILLIS = 3;
65 const int FIELD_ID_SKIPPED_END_MILLIS = 4;
66 const int FIELD_ID_SKIPPED_DROP_EVENT = 5;
67 // for DumpEvent Proto
68 const int FIELD_ID_BUCKET_DROP_REASON = 1;
69 const int FIELD_ID_DROP_TIME = 2;
70 // for *MetricData
71 const int FIELD_ID_DIMENSION_IN_WHAT = 1;
72 const int FIELD_ID_BUCKET_INFO = 3;
73 const int FIELD_ID_DIMENSION_LEAF_IN_WHAT = 4;
74 const int FIELD_ID_SLICE_BY_STATE = 6;
75
76 template <typename AggregatedValue, typename DimExtras>
ValueMetricProducer(const int64_t metricId,const ConfigKey & key,const uint64_t protoHash,const PullOptions & pullOptions,const BucketOptions & bucketOptions,const WhatOptions & whatOptions,const ConditionOptions & conditionOptions,const StateOptions & stateOptions,const ActivationOptions & activationOptions,const GuardrailOptions & guardrailOptions,const wp<ConfigMetadataProvider> configMetadataProvider)77 ValueMetricProducer<AggregatedValue, DimExtras>::ValueMetricProducer(
78 const int64_t metricId, const ConfigKey& key, const uint64_t protoHash,
79 const PullOptions& pullOptions, const BucketOptions& bucketOptions,
80 const WhatOptions& whatOptions, const ConditionOptions& conditionOptions,
81 const StateOptions& stateOptions, const ActivationOptions& activationOptions,
82 const GuardrailOptions& guardrailOptions,
83 const wp<ConfigMetadataProvider> configMetadataProvider)
84 : MetricProducer(metricId, key, bucketOptions.timeBaseNs, conditionOptions.conditionIndex,
85 conditionOptions.initialConditionCache, conditionOptions.conditionWizard,
86 protoHash, activationOptions.eventActivationMap,
87 activationOptions.eventDeactivationMap, stateOptions.slicedStateAtoms,
88 stateOptions.stateGroupMap, bucketOptions.splitBucketForAppUpgrade,
89 configMetadataProvider),
90 mWhatMatcherIndex(whatOptions.whatMatcherIndex),
91 mEventMatcherWizard(whatOptions.matcherWizard),
92 mPullerManager(pullOptions.pullerManager),
93 mFieldMatchers(whatOptions.fieldMatchers),
94 mPullAtomId(pullOptions.pullAtomId),
95 mMinBucketSizeNs(bucketOptions.minBucketSizeNs),
96 mDimensionSoftLimit(guardrailOptions.dimensionSoftLimit),
97 mDimensionHardLimit(guardrailOptions.dimensionHardLimit),
98 mCurrentBucketIsSkipped(false),
99 mConditionCorrectionThresholdNs(bucketOptions.conditionCorrectionThresholdNs) {
100 // TODO(b/185722221): inject directly via initializer list in MetricProducer.
101 mBucketSizeNs = bucketOptions.bucketSizeNs;
102
103 // TODO(b/185770171): inject dimensionsInWhat related fields via constructor.
104 if (whatOptions.dimensionsInWhat.field() > 0) {
105 translateFieldMatcher(whatOptions.dimensionsInWhat, &mDimensionsInWhat);
106 }
107 mContainANYPositionInDimensionsInWhat = whatOptions.containsAnyPositionInDimensionsInWhat;
108 mShouldUseNestedDimensions = whatOptions.shouldUseNestedDimensions;
109
110 if (conditionOptions.conditionLinks.size() > 0) {
111 for (const auto& link : conditionOptions.conditionLinks) {
112 Metric2Condition mc;
113 mc.conditionId = link.condition();
114 translateFieldMatcher(link.fields_in_what(), &mc.metricFields);
115 translateFieldMatcher(link.fields_in_condition(), &mc.conditionFields);
116 mMetric2ConditionLinks.push_back(mc);
117 }
118
119 // TODO(b/185770739): use !mMetric2ConditionLinks.empty() instead
120 mConditionSliced = true;
121 }
122
123 for (const auto& stateLink : stateOptions.stateLinks) {
124 Metric2State ms;
125 ms.stateAtomId = stateLink.state_atom_id();
126 translateFieldMatcher(stateLink.fields_in_what(), &ms.metricFields);
127 translateFieldMatcher(stateLink.fields_in_state(), &ms.stateFields);
128 mMetric2StateLinks.push_back(ms);
129 }
130
131 const int64_t numBucketsForward = calcBucketsForwardCount(bucketOptions.startTimeNs);
132 mCurrentBucketNum = numBucketsForward;
133
134 flushIfNeededLocked(bucketOptions.startTimeNs);
135
136 if (isPulled()) {
137 mPullerManager->RegisterReceiver(mPullAtomId, mConfigKey, this, getCurrentBucketEndTimeNs(),
138 mBucketSizeNs);
139 }
140
141 // Only do this for partial buckets like first bucket. All other buckets should use
142 // flushIfNeeded to adjust start and end to bucket boundaries.
143 // Adjust start for partial bucket
144 mCurrentBucketStartTimeNs = bucketOptions.startTimeNs;
145 mConditionTimer.newBucketStart(mCurrentBucketStartTimeNs, mCurrentBucketStartTimeNs);
146
147 // Now that activations are processed, start the condition timer if needed.
148 mConditionTimer.onConditionChanged(mIsActive && mCondition == ConditionState::kTrue,
149 mCurrentBucketStartTimeNs);
150 }
151
152 template <typename AggregatedValue, typename DimExtras>
~ValueMetricProducer()153 ValueMetricProducer<AggregatedValue, DimExtras>::~ValueMetricProducer() {
154 VLOG("~ValueMetricProducer() called");
155 if (isPulled()) {
156 mPullerManager->UnRegisterReceiver(mPullAtomId, mConfigKey, this);
157 }
158 }
159
160 template <typename AggregatedValue, typename DimExtras>
onStatsdInitCompleted(const int64_t eventTimeNs)161 void ValueMetricProducer<AggregatedValue, DimExtras>::onStatsdInitCompleted(
162 const int64_t eventTimeNs) {
163 ATRACE_CALL();
164 lock_guard<mutex> lock(mMutex);
165
166 if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
167 pullAndMatchEventsLocked(eventTimeNs);
168 }
169 flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
170 }
171
172 template <typename AggregatedValue, typename DimExtras>
notifyAppUpgradeInternalLocked(const int64_t eventTimeNs)173 void ValueMetricProducer<AggregatedValue, DimExtras>::notifyAppUpgradeInternalLocked(
174 const int64_t eventTimeNs) {
175 if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
176 pullAndMatchEventsLocked(eventTimeNs);
177 }
178 flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
179 }
180
181 template <typename AggregatedValue, typename DimExtras>
182 optional<InvalidConfigReason>
onConfigUpdatedLocked(const StatsdConfig & config,const int configIndex,const int metricIndex,const vector<sp<AtomMatchingTracker>> & allAtomMatchingTrackers,const unordered_map<int64_t,int> & oldAtomMatchingTrackerMap,const unordered_map<int64_t,int> & newAtomMatchingTrackerMap,const sp<EventMatcherWizard> & matcherWizard,const vector<sp<ConditionTracker>> & allConditionTrackers,const unordered_map<int64_t,int> & conditionTrackerMap,const sp<ConditionWizard> & wizard,const unordered_map<int64_t,int> & metricToActivationMap,unordered_map<int,vector<int>> & trackerToMetricMap,unordered_map<int,vector<int>> & conditionToMetricMap,unordered_map<int,vector<int>> & activationAtomTrackerToMetricMap,unordered_map<int,vector<int>> & deactivationAtomTrackerToMetricMap,vector<int> & metricsWithActivation)183 ValueMetricProducer<AggregatedValue, DimExtras>::onConfigUpdatedLocked(
184 const StatsdConfig& config, const int configIndex, const int metricIndex,
185 const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
186 const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
187 const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
188 const sp<EventMatcherWizard>& matcherWizard,
189 const vector<sp<ConditionTracker>>& allConditionTrackers,
190 const unordered_map<int64_t, int>& conditionTrackerMap, const sp<ConditionWizard>& wizard,
191 const unordered_map<int64_t, int>& metricToActivationMap,
192 unordered_map<int, vector<int>>& trackerToMetricMap,
193 unordered_map<int, vector<int>>& conditionToMetricMap,
194 unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
195 unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
196 vector<int>& metricsWithActivation) {
197 optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
198 config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
199 newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
200 wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
201 activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
202 metricsWithActivation);
203 if (invalidConfigReason.has_value()) {
204 return invalidConfigReason;
205 }
206 // Update appropriate indices: mWhatMatcherIndex, mConditionIndex and MetricsManager maps.
207 const int64_t atomMatcherId = getWhatAtomMatcherIdForMetric(config, configIndex);
208 invalidConfigReason = handleMetricWithAtomMatchingTrackers(
209 atomMatcherId, mMetricId, metricIndex, /*enforceOneAtom=*/false,
210 allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap,
211 mWhatMatcherIndex);
212 if (invalidConfigReason.has_value()) {
213 return invalidConfigReason;
214 }
215 const optional<int64_t>& conditionIdOpt = getConditionIdForMetric(config, configIndex);
216 const ConditionLinks& conditionLinks = getConditionLinksForMetric(config, configIndex);
217 if (conditionIdOpt.has_value()) {
218 invalidConfigReason = handleMetricWithConditions(
219 conditionIdOpt.value(), mMetricId, metricIndex, conditionTrackerMap, conditionLinks,
220 allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
221 if (invalidConfigReason.has_value()) {
222 return invalidConfigReason;
223 }
224 }
225 sp<EventMatcherWizard> tmpEventWizard = mEventMatcherWizard;
226 mEventMatcherWizard = matcherWizard;
227 return nullopt;
228 }
229
230 template <typename AggregatedValue, typename DimExtras>
computeValueBucketSizeLocked(const bool isFullBucket,const MetricDimensionKey & dimKey,const bool isFirstBucket,const PastBucket<AggregatedValue> & bucket) const231 size_t ValueMetricProducer<AggregatedValue, DimExtras>::computeValueBucketSizeLocked(
232 const bool isFullBucket, const MetricDimensionKey& dimKey, const bool isFirstBucket,
233 const PastBucket<AggregatedValue>& bucket) const {
234 size_t bucketSize =
235 MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
236
237 for (const auto& value : bucket.aggregates) {
238 bucketSize += getAggregatedValueSize(value);
239 }
240
241 // ConditionTrueNanos
242 if (mConditionTrackerIndex >= 0 || !mSlicedStateAtoms.empty()) {
243 bucketSize += sizeof(int64_t);
244 }
245
246 // ConditionCorrectionNanos
247 if (getDumpProtoFields().conditionCorrectionNsFieldId.has_value() && isPulled() &&
248 mConditionCorrectionThresholdNs &&
249 (abs(bucket.mConditionCorrectionNs) >= mConditionCorrectionThresholdNs)) {
250 bucketSize += sizeof(int64_t);
251 }
252 return bucketSize;
253 }
254
255 template <typename AggregatedValue, typename DimExtras>
onStateChanged(int64_t eventTimeNs,int32_t atomId,const HashableDimensionKey & primaryKey,const FieldValue & oldState,const FieldValue & newState)256 void ValueMetricProducer<AggregatedValue, DimExtras>::onStateChanged(
257 int64_t eventTimeNs, int32_t atomId, const HashableDimensionKey& primaryKey,
258 const FieldValue& oldState, const FieldValue& newState) {
259 std::lock_guard<std::mutex> lock(mMutex);
260 VLOG("ValueMetricProducer %lld onStateChanged time %lld, State %d, key %s, %d -> %d",
261 (long long)mMetricId, (long long)eventTimeNs, atomId, primaryKey.toString().c_str(),
262 oldState.mValue.int_value, newState.mValue.int_value);
263
264 FieldValue oldStateCopy = oldState;
265 FieldValue newStateCopy = newState;
266 mapStateValue(atomId, &oldStateCopy);
267 mapStateValue(atomId, &newStateCopy);
268
269 // If old and new states are in the same StateGroup, then we do not need to
270 // pull for this state change.
271 if (oldStateCopy == newStateCopy) {
272 return;
273 }
274
275 // If condition is not true or metric is not active, we do not need to pull
276 // for this state change.
277 if (mCondition != ConditionState::kTrue || !mIsActive) {
278 return;
279 }
280
281 if (isEventLateLocked(eventTimeNs)) {
282 VLOG("Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
283 (long long)mCurrentBucketStartTimeNs);
284 invalidateCurrentBucket(eventTimeNs, BucketDropReason::EVENT_IN_WRONG_BUCKET);
285 return;
286 }
287
288 if (isPulled()) {
289 mStateChangePrimaryKey.first = atomId;
290 mStateChangePrimaryKey.second = primaryKey;
291 // TODO(b/185796114): pass mStateChangePrimaryKey as an argument to
292 // pullAndMatchEventsLocked
293 pullAndMatchEventsLocked(eventTimeNs);
294 mStateChangePrimaryKey.first = 0;
295 mStateChangePrimaryKey.second = DEFAULT_DIMENSION_KEY;
296 }
297 flushIfNeededLocked(eventTimeNs);
298 }
299
300 template <typename AggregatedValue, typename DimExtras>
onSlicedConditionMayChangeLocked(bool overallCondition,const int64_t eventTime)301 void ValueMetricProducer<AggregatedValue, DimExtras>::onSlicedConditionMayChangeLocked(
302 bool overallCondition, const int64_t eventTime) {
303 VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
304 }
305
306 template <typename AggregatedValue, typename DimExtras>
dropDataLocked(const int64_t dropTimeNs)307 void ValueMetricProducer<AggregatedValue, DimExtras>::dropDataLocked(const int64_t dropTimeNs) {
308 StatsdStats::getInstance().noteBucketDropped(mMetricId);
309
310 // The current partial bucket is not flushed and does not require a pull,
311 // so the data is still valid.
312 flushIfNeededLocked(dropTimeNs);
313 clearPastBucketsLocked(dropTimeNs);
314 resetDataCorruptionFlagsLocked();
315 }
316
317 template <typename AggregatedValue, typename DimExtras>
clearPastBucketsLocked(const int64_t dumpTimeNs)318 void ValueMetricProducer<AggregatedValue, DimExtras>::clearPastBucketsLocked(
319 const int64_t dumpTimeNs) {
320 mPastBuckets.clear();
321 mSkippedBuckets.clear();
322 resetDataCorruptionFlagsLocked();
323 mTotalDataSize = 0;
324 }
325
326 template <typename AggregatedValue, typename DimExtras>
onDumpReportLocked(const int64_t dumpTimeNs,const bool includeCurrentPartialBucket,const bool eraseData,const DumpLatency dumpLatency,set<string> * strSet,set<int32_t> & usedUids,ProtoOutputStream * protoOutput)327 void ValueMetricProducer<AggregatedValue, DimExtras>::onDumpReportLocked(
328 const int64_t dumpTimeNs, const bool includeCurrentPartialBucket, const bool eraseData,
329 const DumpLatency dumpLatency, set<string>* strSet, set<int32_t>& usedUids,
330 ProtoOutputStream* protoOutput) {
331 VLOG("metric %lld dump report now...", (long long)mMetricId);
332
333 // Pulled metrics need to pull before flushing, which is why they do not call flushIfNeeded.
334 // TODO: b/249823426 see if we can pull and call flushIfneeded for pulled value metrics.
335 if (!isPulled()) {
336 flushIfNeededLocked(dumpTimeNs);
337 }
338 if (includeCurrentPartialBucket) {
339 // For pull metrics, we need to do a pull at bucket boundaries. If we do not do that the
340 // current bucket will have incomplete data and the next will have the wrong snapshot to do
341 // a diff against. If the condition is false, we are fine since the base data is reset and
342 // we are not tracking anything.
343 if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
344 switch (dumpLatency) {
345 case FAST:
346 invalidateCurrentBucket(dumpTimeNs, BucketDropReason::DUMP_REPORT_REQUESTED);
347 break;
348 case NO_TIME_CONSTRAINTS:
349 pullAndMatchEventsLocked(dumpTimeNs);
350 break;
351 }
352 }
353 flushCurrentBucketLocked(dumpTimeNs, dumpTimeNs);
354 }
355
356 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
357 protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
358
359 // Data corrupted reason
360 writeDataCorruptedReasons(*protoOutput, FIELD_ID_DATA_CORRUPTED_REASON,
361 mDataCorruptedDueToQueueOverflow != DataCorruptionSeverity::kNone,
362 mDataCorruptedDueToSocketLoss != DataCorruptionSeverity::kNone);
363
364 if (mPastBuckets.empty() && mSkippedBuckets.empty()) {
365 if (eraseData) {
366 resetDataCorruptionFlagsLocked();
367 }
368 return;
369 }
370
371 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
372 (long long)byteSizeLocked());
373
374 if (StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId)) {
375 protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_DIMENSION_GUARDRAIL_HIT, true);
376 }
377 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_TIME_BASE, (long long)mTimeBaseNs);
378 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_SIZE, (long long)mBucketSizeNs);
379 // Fills the dimension path if not slicing by a primitive repeated field or position ALL.
380 if (!mShouldUseNestedDimensions) {
381 if (!mDimensionsInWhat.empty()) {
382 uint64_t dimenPathToken =
383 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_WHAT);
384 writeDimensionPathToProto(mDimensionsInWhat, protoOutput);
385 protoOutput->end(dimenPathToken);
386 }
387 }
388
389 const auto& [metricTypeFieldId, bucketNumFieldId, startBucketMsFieldId, endBucketMsFieldId,
390 conditionTrueNsFieldId,
391 conditionCorrectionNsFieldId] = getDumpProtoFields();
392
393 uint64_t protoToken = protoOutput->start(FIELD_TYPE_MESSAGE | metricTypeFieldId);
394
395 for (const auto& skippedBucket : mSkippedBuckets) {
396 uint64_t wrapperToken =
397 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_SKIPPED);
398 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_START_MILLIS,
399 (long long)(NanoToMillis(skippedBucket.bucketStartTimeNs)));
400 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_END_MILLIS,
401 (long long)(NanoToMillis(skippedBucket.bucketEndTimeNs)));
402 for (const auto& dropEvent : skippedBucket.dropEvents) {
403 uint64_t dropEventToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED |
404 FIELD_ID_SKIPPED_DROP_EVENT);
405 protoOutput->write(FIELD_TYPE_INT32 | FIELD_ID_BUCKET_DROP_REASON, dropEvent.reason);
406 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_DROP_TIME,
407 (long long)(NanoToMillis(dropEvent.dropTimeNs)));
408 protoOutput->end(dropEventToken);
409 }
410 protoOutput->end(wrapperToken);
411 }
412
413 for (const auto& [metricDimensionKey, buckets] : mPastBuckets) {
414 VLOG(" dimension key %s", metricDimensionKey.toString().c_str());
415 uint64_t wrapperToken =
416 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_DATA);
417
418 // First fill dimension.
419 if (mShouldUseNestedDimensions) {
420 uint64_t dimensionToken =
421 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_WHAT);
422 writeDimensionToProto(metricDimensionKey.getDimensionKeyInWhat(), mUidFields, strSet,
423 usedUids, protoOutput);
424 protoOutput->end(dimensionToken);
425 } else {
426 writeDimensionLeafNodesToProto(metricDimensionKey.getDimensionKeyInWhat(),
427 FIELD_ID_DIMENSION_LEAF_IN_WHAT, mUidFields, strSet,
428 usedUids, protoOutput);
429 }
430
431 // Then fill slice_by_state.
432 for (auto state : metricDimensionKey.getStateValuesKey().getValues()) {
433 uint64_t stateToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED |
434 FIELD_ID_SLICE_BY_STATE);
435 writeStateToProto(state, protoOutput);
436 protoOutput->end(stateToken);
437 }
438
439 // Then fill bucket_info (*BucketInfo).
440 for (const auto& bucket : buckets) {
441 uint64_t bucketInfoToken = protoOutput->start(
442 FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_BUCKET_INFO);
443
444 if (bucket.mBucketEndNs - bucket.mBucketStartNs != mBucketSizeNs) {
445 protoOutput->write(FIELD_TYPE_INT64 | startBucketMsFieldId,
446 (long long)NanoToMillis(bucket.mBucketStartNs));
447 protoOutput->write(FIELD_TYPE_INT64 | endBucketMsFieldId,
448 (long long)NanoToMillis(bucket.mBucketEndNs));
449 } else {
450 protoOutput->write(FIELD_TYPE_INT64 | bucketNumFieldId,
451 (long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs)));
452 }
453 // We only write the condition timer value if the metric has a
454 // condition and/or is sliced by state.
455 // If the metric is sliced by state, the condition timer value is
456 // also sliced by state to reflect time spent in that state.
457 if (mConditionTrackerIndex >= 0 || !mSlicedStateAtoms.empty()) {
458 protoOutput->write(FIELD_TYPE_INT64 | conditionTrueNsFieldId,
459 (long long)bucket.mConditionTrueNs);
460 }
461
462 if (conditionCorrectionNsFieldId) {
463 // We write the condition correction value when below conditions are true:
464 // - if metric is pulled
465 // - if it is enabled by metric configuration via dedicated field,
466 // see condition_correction_threshold_nanos
467 // - if the abs(value) >= condition_correction_threshold_nanos
468
469 if (isPulled() && mConditionCorrectionThresholdNs &&
470 (abs(bucket.mConditionCorrectionNs) >= mConditionCorrectionThresholdNs)) {
471 protoOutput->write(FIELD_TYPE_INT64 | conditionCorrectionNsFieldId.value(),
472 (long long)bucket.mConditionCorrectionNs);
473 }
474 }
475
476 for (int i = 0; i < (int)bucket.aggIndex.size(); i++) {
477 VLOG("\t bucket [%lld - %lld]", (long long)bucket.mBucketStartNs,
478 (long long)bucket.mBucketEndNs);
479 int sampleSize = !bucket.sampleSizes.empty() ? bucket.sampleSizes[i] : 0;
480 writePastBucketAggregateToProto(bucket.aggIndex[i], bucket.aggregates[i],
481 sampleSize, protoOutput);
482 }
483 protoOutput->end(bucketInfoToken);
484 }
485 protoOutput->end(wrapperToken);
486 }
487 protoOutput->end(protoToken);
488
489 VLOG("metric %lld done with dump report...", (long long)mMetricId);
490 if (eraseData) {
491 mPastBuckets.clear();
492 mSkippedBuckets.clear();
493 resetDataCorruptionFlagsLocked();
494 mTotalDataSize = 0;
495 }
496 }
497
498 template <typename AggregatedValue, typename DimExtras>
invalidateCurrentBucket(const int64_t dropTimeNs,const BucketDropReason reason)499 void ValueMetricProducer<AggregatedValue, DimExtras>::invalidateCurrentBucket(
500 const int64_t dropTimeNs, const BucketDropReason reason) {
501 if (!mCurrentBucketIsSkipped) {
502 // Only report to StatsdStats once per invalid bucket.
503 StatsdStats::getInstance().noteInvalidatedBucket(mMetricId);
504 }
505
506 skipCurrentBucket(dropTimeNs, reason);
507 }
508
509 template <typename AggregatedValue, typename DimExtras>
skipCurrentBucket(const int64_t dropTimeNs,const BucketDropReason reason)510 void ValueMetricProducer<AggregatedValue, DimExtras>::skipCurrentBucket(
511 const int64_t dropTimeNs, const BucketDropReason reason) {
512 if (!mIsActive) {
513 // Don't keep track of skipped buckets if metric is not active.
514 return;
515 }
516
517 if (!maxDropEventsReached()) {
518 mCurrentSkippedBucket.dropEvents.push_back(buildDropEvent(dropTimeNs, reason));
519 }
520 mCurrentBucketIsSkipped = true;
521 }
522
523 // Handle active state change. Active state change is *mostly* treated like a condition change:
524 // - drop bucket if active state change event arrives too late
525 // - if condition is true, pull data on active state changes
526 // - ConditionTimer tracks changes based on AND of condition and active state.
527 template <typename AggregatedValue, typename DimExtras>
onActiveStateChangedLocked(const int64_t eventTimeNs,const bool isActive)528 void ValueMetricProducer<AggregatedValue, DimExtras>::onActiveStateChangedLocked(
529 const int64_t eventTimeNs, const bool isActive) {
530 const bool eventLate = isEventLateLocked(eventTimeNs);
531 if (eventLate) {
532 // Drop bucket because event arrived too late, ie. we are missing data for this bucket.
533 StatsdStats::getInstance().noteLateLogEventSkipped(mMetricId);
534 invalidateCurrentBucket(eventTimeNs, BucketDropReason::EVENT_IN_WRONG_BUCKET);
535 }
536
537 if (ConditionState::kTrue != mCondition) {
538 // Call parent method before early return.
539 MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
540 return;
541 }
542
543 // Pull on active state changes.
544 if (!eventLate) {
545 if (isPulled()) {
546 pullAndMatchEventsLocked(eventTimeNs);
547 }
548
549 onActiveStateChangedInternalLocked(eventTimeNs, isActive);
550 }
551
552 // Once any pulls are processed, call through to parent method which might flush the current
553 // bucket.
554 MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
555
556 // Let condition timer know of new active state.
557 mConditionTimer.onConditionChanged(isActive, eventTimeNs);
558
559 updateCurrentSlicedBucketConditionTimers(isActive, eventTimeNs);
560 }
561
562 template <typename AggregatedValue, typename DimExtras>
onConditionChangedLocked(const bool condition,const int64_t eventTimeNs)563 void ValueMetricProducer<AggregatedValue, DimExtras>::onConditionChangedLocked(
564 const bool condition, const int64_t eventTimeNs) {
565 const ConditionState newCondition = condition ? ConditionState::kTrue : ConditionState::kFalse;
566 const ConditionState oldCondition = mCondition;
567
568 if (!mIsActive) {
569 mCondition = newCondition;
570 return;
571 }
572
573 // If the event arrived late, mark the bucket as invalid and skip the event.
574 if (isEventLateLocked(eventTimeNs)) {
575 VLOG("Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
576 (long long)mCurrentBucketStartTimeNs);
577 StatsdStats::getInstance().noteLateLogEventSkipped(mMetricId);
578 StatsdStats::getInstance().noteConditionChangeInNextBucket(mMetricId);
579 invalidateCurrentBucket(eventTimeNs, BucketDropReason::EVENT_IN_WRONG_BUCKET);
580 mCondition = newCondition;
581 mConditionTimer.onConditionChanged(newCondition, eventTimeNs);
582 updateCurrentSlicedBucketConditionTimers(newCondition, eventTimeNs);
583 return;
584 }
585
586 // If the previous condition was unknown, mark the bucket as invalid
587 // because the bucket will contain partial data. For example, the condition
588 // change might happen close to the end of the bucket and we might miss a
589 // lot of data.
590 // We still want to pull to set the base for diffed metrics.
591 if (oldCondition == ConditionState::kUnknown) {
592 invalidateCurrentBucket(eventTimeNs, BucketDropReason::CONDITION_UNKNOWN);
593 }
594
595 // Pull and match for the following condition change cases:
596 // unknown/false -> true - condition changed
597 // true -> false - condition changed
598 // true -> true - old condition was true so we can flush the bucket at the
599 // end if needed.
600 //
601 // We don’t need to pull for unknown -> false or false -> false.
602 //
603 // onConditionChangedLocked might happen on bucket boundaries if this is
604 // called before #onDataPulled.
605 if (isPulled() &&
606 (newCondition == ConditionState::kTrue || oldCondition == ConditionState::kTrue)) {
607 pullAndMatchEventsLocked(eventTimeNs);
608 }
609
610 onConditionChangedInternalLocked(oldCondition, newCondition, eventTimeNs);
611
612 // Update condition state after pulling.
613 mCondition = newCondition;
614
615 flushIfNeededLocked(eventTimeNs);
616
617 mConditionTimer.onConditionChanged(newCondition, eventTimeNs);
618 updateCurrentSlicedBucketConditionTimers(newCondition, eventTimeNs);
619 }
620
621 template <typename AggregatedValue, typename DimExtras>
updateCurrentSlicedBucketConditionTimers(bool newCondition,int64_t eventTimeNs)622 void ValueMetricProducer<AggregatedValue, DimExtras>::updateCurrentSlicedBucketConditionTimers(
623 bool newCondition, int64_t eventTimeNs) {
624 if (mSlicedStateAtoms.empty()) {
625 return;
626 }
627
628 // Utilize the current state key of each DimensionsInWhat key to determine
629 // which condition timers to update.
630 //
631 // Assumes that the MetricDimensionKey exists in `mCurrentSlicedBucket`.
632 for (const auto& [dimensionInWhatKey, dimensionInWhatInfo] : mDimInfos) {
633 // If the new condition is true, turn ON the condition timer only if
634 // the DimensionInWhat key was present in the data.
635 mCurrentSlicedBucket[MetricDimensionKey(dimensionInWhatKey,
636 dimensionInWhatInfo.currentState)]
637 .conditionTimer.onConditionChanged(
638 newCondition && dimensionInWhatInfo.hasCurrentState, eventTimeNs);
639 }
640 }
641
642 template <typename AggregatedValue, typename DimExtras>
dumpStatesLocked(int out,bool verbose) const643 void ValueMetricProducer<AggregatedValue, DimExtras>::dumpStatesLocked(int out,
644 bool verbose) const {
645 if (mCurrentSlicedBucket.size() == 0) {
646 return;
647 }
648
649 dprintf(out, "ValueMetricProducer %lld dimension size %lu\n", (long long)mMetricId,
650 (unsigned long)mCurrentSlicedBucket.size());
651 if (verbose) {
652 for (const auto& [metricDimensionKey, currentBucket] : mCurrentSlicedBucket) {
653 for (const Interval& interval : currentBucket.intervals) {
654 dprintf(out, "\t(what)%s\t(states)%s (aggregate)%s\n",
655 metricDimensionKey.getDimensionKeyInWhat().toString().c_str(),
656 metricDimensionKey.getStateValuesKey().toString().c_str(),
657 aggregatedValueToString(interval.aggregate).c_str());
658 }
659 }
660 }
661 }
662
663 template <typename AggregatedValue, typename DimExtras>
hasReachedGuardRailLimit() const664 bool ValueMetricProducer<AggregatedValue, DimExtras>::hasReachedGuardRailLimit() const {
665 return mCurrentSlicedBucket.size() >= mDimensionHardLimit;
666 }
667
668 template <typename AggregatedValue, typename DimExtras>
hitGuardRailLocked(const MetricDimensionKey & newKey) const669 bool ValueMetricProducer<AggregatedValue, DimExtras>::hitGuardRailLocked(
670 const MetricDimensionKey& newKey) const {
671 // ===========GuardRail==============
672 // 1. Report the tuple count if the tuple count > soft limit
673 if (mCurrentSlicedBucket.find(newKey) != mCurrentSlicedBucket.end()) {
674 return false;
675 }
676 if (mCurrentSlicedBucket.size() > mDimensionSoftLimit - 1) {
677 size_t newTupleCount = mCurrentSlicedBucket.size() + 1;
678 StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
679 // 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
680 if (hasReachedGuardRailLimit()) {
681 if (!mHasHitGuardrail) {
682 ALOGE("ValueMetricProducer %lld dropping data for dimension key %s",
683 (long long)mMetricId, newKey.toString().c_str());
684 mHasHitGuardrail = true;
685 }
686 StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
687 return true;
688 }
689 }
690
691 return false;
692 }
693
694 template <typename AggregatedValue, typename DimExtras>
onMatchedLogEventInternalLocked(const size_t matcherIndex,const MetricDimensionKey & eventKey,const ConditionKey & conditionKey,bool condition,const LogEvent & event,const map<int,HashableDimensionKey> & statePrimaryKeys)695 void ValueMetricProducer<AggregatedValue, DimExtras>::onMatchedLogEventInternalLocked(
696 const size_t matcherIndex, const MetricDimensionKey& eventKey,
697 const ConditionKey& conditionKey, bool condition, const LogEvent& event,
698 const map<int, HashableDimensionKey>& statePrimaryKeys) {
699 // Skip this event if a state change occurred for a different primary key.
700 auto it = statePrimaryKeys.find(mStateChangePrimaryKey.first);
701 // Check that both the atom id and the primary key are equal.
702 if (it != statePrimaryKeys.end() && it->second != mStateChangePrimaryKey.second) {
703 VLOG("ValueMetric skip event with primary key %s because state change primary key "
704 "is %s",
705 it->second.toString().c_str(), mStateChangePrimaryKey.second.toString().c_str());
706 return;
707 }
708
709 const int64_t eventTimeNs = event.GetElapsedTimestampNs();
710 if (isEventLateLocked(eventTimeNs)) {
711 VLOG("Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
712 (long long)mCurrentBucketStartTimeNs);
713 return;
714 }
715
716 const auto whatKey = eventKey.getDimensionKeyInWhat();
717 mMatchedMetricDimensionKeys.insert(whatKey);
718
719 if (!isPulled()) {
720 // Only flushing for pushed because for pulled metrics, we need to do a pull first.
721 flushIfNeededLocked(eventTimeNs);
722 }
723
724 if (canSkipLogEventLocked(eventKey, condition, eventTimeNs, statePrimaryKeys)) {
725 return;
726 }
727
728 if (hitGuardRailLocked(eventKey)) {
729 return;
730 }
731
732 const auto& returnVal = mDimInfos.emplace(whatKey, DimensionsInWhatInfo(getUnknownStateKey()));
733 DimensionsInWhatInfo& dimensionsInWhatInfo = returnVal.first->second;
734 const HashableDimensionKey& oldStateKey = dimensionsInWhatInfo.currentState;
735 CurrentBucket& currentBucket = mCurrentSlicedBucket[MetricDimensionKey(whatKey, oldStateKey)];
736
737 // Ensure we turn on the condition timer in the case where dimensions
738 // were missing on a previous pull due to a state change.
739 const auto stateKey = eventKey.getStateValuesKey();
740 const bool stateChange = oldStateKey != stateKey || !dimensionsInWhatInfo.hasCurrentState;
741
742 // We need to get the intervals stored with the previous state key so we can
743 // close these value intervals.
744 vector<Interval>& intervals = currentBucket.intervals;
745 if (intervals.size() < mFieldMatchers.size()) {
746 VLOG("Resizing number of intervals to %d", (int)mFieldMatchers.size());
747 intervals.resize(mFieldMatchers.size());
748 }
749
750 dimensionsInWhatInfo.hasCurrentState = true;
751 dimensionsInWhatInfo.currentState = stateKey;
752
753 dimensionsInWhatInfo.seenNewData |= aggregateFields(eventTimeNs, eventKey, event, intervals,
754 dimensionsInWhatInfo.dimExtras);
755
756 // State change.
757 if (!mSlicedStateAtoms.empty() && stateChange) {
758 // Turn OFF the condition timer for the previous state key.
759 currentBucket.conditionTimer.onConditionChanged(false, eventTimeNs);
760
761 // Turn ON the condition timer for the new state key.
762 mCurrentSlicedBucket[MetricDimensionKey(whatKey, stateKey)]
763 .conditionTimer.onConditionChanged(true, eventTimeNs);
764 }
765 }
766
767 // For pulled metrics, we always need to make sure we do a pull before flushing the bucket
768 // if mCondition and mIsActive are true!
769 template <typename AggregatedValue, typename DimExtras>
flushIfNeededLocked(const int64_t eventTimeNs)770 void ValueMetricProducer<AggregatedValue, DimExtras>::flushIfNeededLocked(
771 const int64_t eventTimeNs) {
772 const int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
773 if (eventTimeNs < currentBucketEndTimeNs) {
774 VLOG("eventTime is %lld, less than current bucket end time %lld", (long long)eventTimeNs,
775 (long long)(currentBucketEndTimeNs));
776 return;
777 }
778 int64_t numBucketsForward = calcBucketsForwardCount(eventTimeNs);
779 int64_t nextBucketStartTimeNs =
780 currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
781 flushCurrentBucketLocked(eventTimeNs, nextBucketStartTimeNs);
782 }
783
784 template <typename AggregatedValue, typename DimExtras>
calcBucketsForwardCount(const int64_t eventTimeNs) const785 int64_t ValueMetricProducer<AggregatedValue, DimExtras>::calcBucketsForwardCount(
786 const int64_t eventTimeNs) const {
787 int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
788 if (eventTimeNs < currentBucketEndTimeNs) {
789 return 0;
790 }
791 return 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
792 }
793
794 template <typename AggregatedValue, typename DimExtras>
flushCurrentBucketLocked(const int64_t eventTimeNs,const int64_t nextBucketStartTimeNs)795 void ValueMetricProducer<AggregatedValue, DimExtras>::flushCurrentBucketLocked(
796 const int64_t eventTimeNs, const int64_t nextBucketStartTimeNs) {
797 if (mCondition == ConditionState::kUnknown) {
798 StatsdStats::getInstance().noteBucketUnknownCondition(mMetricId);
799 invalidateCurrentBucket(eventTimeNs, BucketDropReason::CONDITION_UNKNOWN);
800 }
801
802 VLOG("finalizing bucket for %ld, dumping %d slices", (long)mCurrentBucketStartTimeNs,
803 (int)mCurrentSlicedBucket.size());
804
805 closeCurrentBucket(eventTimeNs, nextBucketStartTimeNs);
806 initNextSlicedBucket(nextBucketStartTimeNs);
807
808 // Update the condition timer again, in case we skipped buckets.
809 mConditionTimer.newBucketStart(eventTimeNs, nextBucketStartTimeNs);
810
811 // NOTE: Update the condition timers in `mCurrentSlicedBucket` only when slicing
812 // by state. Otherwise, the "global" condition timer will be used.
813 if (!mSlicedStateAtoms.empty()) {
814 for (auto& [metricDimensionKey, currentBucket] : mCurrentSlicedBucket) {
815 currentBucket.conditionTimer.newBucketStart(eventTimeNs, nextBucketStartTimeNs);
816 }
817 }
818 mCurrentBucketNum += calcBucketsForwardCount(eventTimeNs);
819 }
820
821 template <typename AggregatedValue, typename DimExtras>
closeCurrentBucket(const int64_t eventTimeNs,const int64_t nextBucketStartTimeNs)822 void ValueMetricProducer<AggregatedValue, DimExtras>::closeCurrentBucket(
823 const int64_t eventTimeNs, const int64_t nextBucketStartTimeNs) {
824 const int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
825 int64_t bucketEndTimeNs = fullBucketEndTimeNs;
826 int64_t numBucketsForward = calcBucketsForwardCount(eventTimeNs);
827
828 if (multipleBucketsSkipped(numBucketsForward)) {
829 VLOG("Skipping forward %lld buckets", (long long)numBucketsForward);
830 StatsdStats::getInstance().noteSkippedForwardBuckets(mMetricId);
831 // Something went wrong. Maybe the device was sleeping for a long time. It is better
832 // to mark the current bucket as invalid. The last pull might have been successful though.
833 invalidateCurrentBucket(eventTimeNs, BucketDropReason::MULTIPLE_BUCKETS_SKIPPED);
834
835 // End the bucket at the next bucket start time so the entire interval is skipped.
836 bucketEndTimeNs = nextBucketStartTimeNs;
837 } else if (eventTimeNs < fullBucketEndTimeNs) {
838 bucketEndTimeNs = eventTimeNs;
839 }
840
841 // Close the current bucket
842 const auto [globalConditionDurationNs, globalConditionCorrectionNs] =
843 mConditionTimer.newBucketStart(eventTimeNs, bucketEndTimeNs);
844
845 bool isBucketLargeEnough = bucketEndTimeNs - mCurrentBucketStartTimeNs >= mMinBucketSizeNs;
846 if (!isBucketLargeEnough) {
847 skipCurrentBucket(eventTimeNs, BucketDropReason::BUCKET_TOO_SMALL);
848 }
849 if (!mCurrentBucketIsSkipped) {
850 bool bucketHasData = false;
851 // The current bucket is large enough to keep.
852 for (auto& [metricDimensionKey, currentBucket] : mCurrentSlicedBucket) {
853 PastBucket<AggregatedValue> bucket =
854 buildPartialBucket(bucketEndTimeNs, currentBucket.intervals);
855 if (bucket.aggIndex.empty()) {
856 continue;
857 }
858 bucketHasData = true;
859 if (!mSlicedStateAtoms.empty()) {
860 const auto [conditionDurationNs, conditionCorrectionNs] =
861 currentBucket.conditionTimer.newBucketStart(eventTimeNs, bucketEndTimeNs);
862 bucket.mConditionTrueNs = conditionDurationNs;
863 bucket.mConditionCorrectionNs = conditionCorrectionNs;
864 } else {
865 bucket.mConditionTrueNs = globalConditionDurationNs;
866 bucket.mConditionCorrectionNs = globalConditionCorrectionNs;
867 }
868
869 auto& bucketList = mPastBuckets[metricDimensionKey];
870 const bool isFirstBucket = bucketList.empty();
871 mTotalDataSize += computeValueBucketSizeLocked(
872 eventTimeNs >= fullBucketEndTimeNs, metricDimensionKey, isFirstBucket, bucket);
873 bucketList.push_back(std::move(bucket));
874 }
875 if (!bucketHasData) {
876 skipCurrentBucket(eventTimeNs, BucketDropReason::NO_DATA);
877 }
878 }
879
880 if (mCurrentBucketIsSkipped) {
881 mCurrentSkippedBucket.bucketStartTimeNs = mCurrentBucketStartTimeNs;
882 mCurrentSkippedBucket.bucketEndTimeNs = bucketEndTimeNs;
883 mSkippedBuckets.push_back(mCurrentSkippedBucket);
884 mTotalDataSize += computeSkippedBucketSizeLocked(mCurrentSkippedBucket);
885 }
886
887 // This means that the current bucket was not flushed before a forced bucket split.
888 // This can happen if an app update or a dump report with includeCurrentPartialBucket is
889 // requested before we get a chance to flush the bucket due to receiving new data, either from
890 // the statsd socket or the StatsPullerManager.
891 if (bucketEndTimeNs < nextBucketStartTimeNs) {
892 SkippedBucket bucketInGap;
893 bucketInGap.bucketStartTimeNs = bucketEndTimeNs;
894 bucketInGap.bucketEndTimeNs = nextBucketStartTimeNs;
895 bucketInGap.dropEvents.emplace_back(buildDropEvent(eventTimeNs, BucketDropReason::NO_DATA));
896 mSkippedBuckets.emplace_back(bucketInGap);
897 }
898 }
899
900 template <typename AggregatedValue, typename DimExtras>
initNextSlicedBucket(int64_t nextBucketStartTimeNs)901 void ValueMetricProducer<AggregatedValue, DimExtras>::initNextSlicedBucket(
902 int64_t nextBucketStartTimeNs) {
903 StatsdStats::getInstance().noteBucketCount(mMetricId);
904 if (mSlicedStateAtoms.empty()) {
905 mCurrentSlicedBucket.clear();
906 } else {
907 for (auto it = mCurrentSlicedBucket.begin(); it != mCurrentSlicedBucket.end();) {
908 bool obsolete = true;
909 for (auto& interval : it->second.intervals) {
910 interval.sampleSize = 0;
911 }
912
913 // When slicing by state, only delete the MetricDimensionKey when the
914 // state key in the MetricDimensionKey is not the current state key.
915 const HashableDimensionKey& dimensionInWhatKey = it->first.getDimensionKeyInWhat();
916 const auto& currentDimInfoItr = mDimInfos.find(dimensionInWhatKey);
917
918 if ((currentDimInfoItr != mDimInfos.end()) &&
919 (it->first.getStateValuesKey() == currentDimInfoItr->second.currentState)) {
920 obsolete = false;
921 }
922 if (obsolete) {
923 it = mCurrentSlicedBucket.erase(it);
924 } else {
925 it++;
926 }
927 }
928 }
929 for (auto it = mDimInfos.begin(); it != mDimInfos.end();) {
930 if (!it->second.seenNewData) {
931 it = mDimInfos.erase(it);
932 } else {
933 it->second.seenNewData = false;
934 it++;
935 }
936 }
937
938 mCurrentBucketIsSkipped = false;
939 mCurrentSkippedBucket.reset();
940
941 mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
942 // Reset mHasHitGuardrail boolean since bucket was reset
943 mHasHitGuardrail = false;
944 VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
945 (long long)mCurrentBucketStartTimeNs);
946 }
947
948 // Explicit template instantiations
949 template class ValueMetricProducer<NumericValue, vector<NumericValue>>;
950 template class ValueMetricProducer<unique_ptr<KllQuantile>, Empty>;
951
952 } // namespace statsd
953 } // namespace os
954 } // namespace android
955