1 // Copyright (C) 2017 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "src/metrics/CountMetricProducer.h"
16
17 #include <gmock/gmock.h>
18 #include <gtest/gtest.h>
19 #include <math.h>
20 #include <stdio.h>
21
22 #include <vector>
23
24 #include "metrics_test_helper.h"
25 #include "src/stats_log_util.h"
26 #include "stats_event.h"
27 #include "tests/statsd_test_util.h"
28
29 using namespace testing;
30 using android::sp;
31 using std::set;
32 using std::unordered_map;
33 using std::vector;
34
35 #ifdef __ANDROID__
36
37 namespace android {
38 namespace os {
39 namespace statsd {
40
41
42 namespace {
43 const ConfigKey kConfigKey(0, 12345);
44 const uint64_t protoHash = 0x1234567890;
45
makeLogEvent(LogEvent * logEvent,int64_t timestampNs,int atomId)46 void makeLogEvent(LogEvent* logEvent, int64_t timestampNs, int atomId) {
47 AStatsEvent* statsEvent = AStatsEvent_obtain();
48 AStatsEvent_setAtomId(statsEvent, atomId);
49 AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
50
51 parseStatsEventToLogEvent(statsEvent, logEvent);
52 }
53
makeLogEvent(LogEvent * logEvent,int64_t timestampNs,int atomId,string uid)54 void makeLogEvent(LogEvent* logEvent, int64_t timestampNs, int atomId, string uid) {
55 AStatsEvent* statsEvent = AStatsEvent_obtain();
56 AStatsEvent_setAtomId(statsEvent, atomId);
57 AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
58 AStatsEvent_writeString(statsEvent, uid.c_str());
59
60 parseStatsEventToLogEvent(statsEvent, logEvent);
61 }
62
onDumpReport(CountMetricProducer & producer,int64_t dumpTimeNs)63 StatsLogReport onDumpReport(CountMetricProducer& producer, int64_t dumpTimeNs) {
64 ProtoOutputStream output;
65 set<int32_t> usedUids;
66 producer.onDumpReport(dumpTimeNs, true /*include current partial bucket*/, true /*erase data*/,
67 FAST, nullptr, usedUids, &output);
68 return outputStreamToProto(&output);
69 }
70 } // namespace
71
72 // Setup for parameterized tests.
73 class CountMetricProducerTest_PartialBucket : public TestWithParam<BucketSplitEvent> {};
74
75 INSTANTIATE_TEST_SUITE_P(CountMetricProducerTest_PartialBucket,
76 CountMetricProducerTest_PartialBucket,
77 testing::Values(APP_UPGRADE, BOOT_COMPLETE));
78
TEST(CountMetricProducerTest,TestFirstBucket)79 TEST(CountMetricProducerTest, TestFirstBucket) {
80 CountMetric metric;
81 metric.set_id(1);
82 metric.set_bucket(ONE_MINUTE);
83 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
84
85 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
86 CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, {},
87 wizard, protoHash, 5, 600 * NS_PER_SEC + NS_PER_SEC / 2,
88 provider);
89 EXPECT_EQ(600500000000, countProducer.mCurrentBucketStartTimeNs);
90 EXPECT_EQ(10, countProducer.mCurrentBucketNum);
91 EXPECT_EQ(660000000005, countProducer.getCurrentBucketEndTimeNs());
92 }
93
TEST(CountMetricProducerTest,TestNonDimensionalEvents)94 TEST(CountMetricProducerTest, TestNonDimensionalEvents) {
95 int64_t bucketStartTimeNs = 10000000000;
96 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
97 int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
98 int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
99 int tagId = 1;
100
101 CountMetric metric;
102 metric.set_id(1);
103 metric.set_bucket(ONE_MINUTE);
104
105 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
106 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
107 CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, {},
108 wizard, protoHash, bucketStartTimeNs, bucketStartTimeNs,
109 provider);
110
111 // 2 events in bucket 1.
112 LogEvent event1(/*uid=*/0, /*pid=*/0);
113 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId);
114 LogEvent event2(/*uid=*/0, /*pid=*/0);
115 makeLogEvent(&event2, bucketStartTimeNs + 2, tagId);
116
117 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
118 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
119
120 // Flushes at event #2.
121 countProducer.flushIfNeededLocked(bucketStartTimeNs + 2);
122 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
123
124 // Flushes.
125 countProducer.flushIfNeededLocked(bucketStartTimeNs + bucketSizeNs + 1);
126 ASSERT_EQ(1UL, countProducer.mPastBuckets.size());
127 EXPECT_TRUE(countProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
128 countProducer.mPastBuckets.end());
129 const auto& buckets = countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
130 ASSERT_EQ(1UL, buckets.size());
131 EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
132 EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[0].mBucketEndNs);
133 EXPECT_EQ(2LL, buckets[0].mCount);
134
135 // 1 matched event happens in bucket 2.
136 LogEvent event3(/*uid=*/0, /*pid=*/0);
137 makeLogEvent(&event3, bucketStartTimeNs + bucketSizeNs + 2, tagId);
138
139 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
140
141 countProducer.flushIfNeededLocked(bucketStartTimeNs + 2 * bucketSizeNs + 1);
142 ASSERT_EQ(1UL, countProducer.mPastBuckets.size());
143 EXPECT_TRUE(countProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
144 countProducer.mPastBuckets.end());
145 ASSERT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
146 const auto& bucketInfo2 = countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1];
147 EXPECT_EQ(bucket2StartTimeNs, bucketInfo2.mBucketStartNs);
148 EXPECT_EQ(bucket2StartTimeNs + bucketSizeNs, bucketInfo2.mBucketEndNs);
149 EXPECT_EQ(1LL, bucketInfo2.mCount);
150
151 // nothing happens in bucket 3. we should not record anything for bucket 3.
152 countProducer.flushIfNeededLocked(bucketStartTimeNs + 3 * bucketSizeNs + 1);
153 ASSERT_EQ(1UL, countProducer.mPastBuckets.size());
154 EXPECT_TRUE(countProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
155 countProducer.mPastBuckets.end());
156 const auto& buckets3 = countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
157 ASSERT_EQ(2UL, buckets3.size());
158 }
159
TEST(CountMetricProducerTest,TestEventsWithNonSlicedCondition)160 TEST(CountMetricProducerTest, TestEventsWithNonSlicedCondition) {
161 int64_t bucketStartTimeNs = 10000000000;
162 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
163
164 CountMetric metric;
165 metric.set_id(1);
166 metric.set_bucket(ONE_MINUTE);
167 metric.set_condition(StringToId("SCREEN_ON"));
168
169 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
170
171 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
172 CountMetricProducer countProducer(kConfigKey, metric, 0, {ConditionState::kUnknown}, wizard,
173 protoHash, bucketStartTimeNs, bucketStartTimeNs, provider);
174 assertConditionTimer(countProducer.mConditionTimer, false, 0, 0);
175
176 countProducer.onConditionChanged(true, bucketStartTimeNs);
177 assertConditionTimer(countProducer.mConditionTimer, true, 0, bucketStartTimeNs);
178
179 LogEvent event1(/*uid=*/0, /*pid=*/0);
180 makeLogEvent(&event1, bucketStartTimeNs + 1, /*atomId=*/1);
181 countProducer.onMatchedLogEvent(1 /*matcher index*/, event1);
182
183 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
184
185 countProducer.onConditionChanged(false /*new condition*/, bucketStartTimeNs + 2);
186 assertConditionTimer(countProducer.mConditionTimer, false, 2, bucketStartTimeNs + 2);
187
188 // Upon this match event, the matched event1 is flushed.
189 LogEvent event2(/*uid=*/0, /*pid=*/0);
190 makeLogEvent(&event2, bucketStartTimeNs + 10, /*atomId=*/1);
191 countProducer.onMatchedLogEvent(1 /*matcher index*/, event2);
192 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
193
194 countProducer.flushIfNeededLocked(bucketStartTimeNs + bucketSizeNs + 1);
195 ASSERT_EQ(1UL, countProducer.mPastBuckets.size());
196 EXPECT_TRUE(countProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
197 countProducer.mPastBuckets.end());
198
199 const auto& buckets = countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
200 ASSERT_EQ(1UL, buckets.size());
201 const auto& bucketInfo = buckets[0];
202 EXPECT_EQ(bucketStartTimeNs, bucketInfo.mBucketStartNs);
203 EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, bucketInfo.mBucketEndNs);
204 EXPECT_EQ(1LL, bucketInfo.mCount);
205 }
206
TEST(CountMetricProducerTest,TestEventsWithSlicedCondition)207 TEST(CountMetricProducerTest, TestEventsWithSlicedCondition) {
208 int64_t bucketStartTimeNs = 10000000000;
209 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
210
211 int tagId = 1;
212 int conditionTagId = 2;
213
214 CountMetric metric;
215 metric.set_id(1);
216 metric.set_bucket(ONE_MINUTE);
217 metric.set_condition(StringToId("APP_IN_BACKGROUND_PER_UID_AND_SCREEN_ON"));
218 MetricConditionLink* link = metric.add_links();
219 link->set_condition(StringToId("APP_IN_BACKGROUND_PER_UID"));
220 buildSimpleAtomFieldMatcher(tagId, 1, link->mutable_fields_in_what());
221 buildSimpleAtomFieldMatcher(conditionTagId, 2, link->mutable_fields_in_condition());
222
223 LogEvent event1(/*uid=*/0, /*pid=*/0);
224 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId, /*uid=*/"111");
225
226 LogEvent event2(/*uid=*/0, /*pid=*/0);
227 makeLogEvent(&event2, bucketStartTimeNs + 10, tagId, /*uid=*/"222");
228
229 ConditionKey key1;
230 key1[StringToId("APP_IN_BACKGROUND_PER_UID")] = {
231 getMockedDimensionKey(conditionTagId, 2, "111")};
232
233 ConditionKey key2;
234 key2[StringToId("APP_IN_BACKGROUND_PER_UID")] = {
235 getMockedDimensionKey(conditionTagId, 2, "222")};
236
237 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
238
239 EXPECT_CALL(*wizard, query(_, key1, _)).WillOnce(Return(ConditionState::kFalse));
240
241 EXPECT_CALL(*wizard, query(_, key2, _)).WillOnce(Return(ConditionState::kTrue));
242
243 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
244 CountMetricProducer countProducer(kConfigKey, metric, 0 /*condition tracker index*/,
245 {ConditionState::kUnknown}, wizard, protoHash,
246 bucketStartTimeNs, bucketStartTimeNs, provider);
247
248 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
249 countProducer.flushIfNeededLocked(bucketStartTimeNs + 1);
250 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
251
252 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
253 countProducer.flushIfNeededLocked(bucketStartTimeNs + bucketSizeNs + 1);
254 ASSERT_EQ(1UL, countProducer.mPastBuckets.size());
255 EXPECT_TRUE(countProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
256 countProducer.mPastBuckets.end());
257 const auto& buckets = countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
258 ASSERT_EQ(1UL, buckets.size());
259 const auto& bucketInfo = buckets[0];
260 EXPECT_EQ(bucketStartTimeNs, bucketInfo.mBucketStartNs);
261 EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, bucketInfo.mBucketEndNs);
262 EXPECT_EQ(1LL, bucketInfo.mCount);
263 }
264
TEST_P(CountMetricProducerTest_PartialBucket,TestSplitInCurrentBucket)265 TEST_P(CountMetricProducerTest_PartialBucket, TestSplitInCurrentBucket) {
266 sp<AlarmMonitor> alarmMonitor;
267 int64_t bucketStartTimeNs = 10000000000;
268 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
269 int64_t eventTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
270
271 int tagId = 1;
272 int conditionTagId = 2;
273
274 CountMetric metric;
275 metric.set_id(1);
276 metric.set_bucket(ONE_MINUTE);
277 metric.set_split_bucket_for_app_upgrade(true);
278 Alert alert;
279 alert.set_num_buckets(3);
280 alert.set_trigger_if_sum_gt(2);
281
282 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
283 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
284 CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, {}, wizard,
285 protoHash, bucketStartTimeNs, bucketStartTimeNs, provider);
286
287 sp<AnomalyTracker> anomalyTracker =
288 countProducer.addAnomalyTracker(alert, alarmMonitor, UPDATE_NEW, bucketStartTimeNs);
289 EXPECT_TRUE(anomalyTracker != nullptr);
290
291 // Bucket is not flushed yet.
292 LogEvent event1(/*uid=*/0, /*pid=*/0);
293 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId, /*uid=*/"111");
294 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
295 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
296 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
297
298 // App upgrade or boot complete forces bucket flush.
299 // Check that there's a past bucket and the bucket end is not adjusted.
300 switch (GetParam()) {
301 case APP_UPGRADE:
302 countProducer.notifyAppUpgrade(eventTimeNs);
303 break;
304 case BOOT_COMPLETE:
305 countProducer.onStatsdInitCompleted(eventTimeNs);
306 break;
307 }
308 ASSERT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
309 EXPECT_EQ(bucketStartTimeNs,
310 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
311 EXPECT_EQ(eventTimeNs,
312 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
313 EXPECT_EQ(0, countProducer.getCurrentBucketNum());
314 EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
315 // Anomaly tracker only contains full buckets.
316 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
317
318 int64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
319 // Next event occurs in same bucket as partial bucket created.
320 LogEvent event2(/*uid=*/0, /*pid=*/0);
321 makeLogEvent(&event2, bucketStartTimeNs + 59 * NS_PER_SEC + 10, tagId, /*uid=*/"222");
322 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
323 ASSERT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
324 EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
325 EXPECT_EQ(0, countProducer.getCurrentBucketNum());
326 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
327
328 // Third event in following bucket.
329 LogEvent event3(/*uid=*/0, /*pid=*/0);
330 makeLogEvent(&event3, bucketStartTimeNs + 62 * NS_PER_SEC + 10, tagId, /*uid=*/"333");
331 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
332 ASSERT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
333 EXPECT_EQ(lastEndTimeNs, countProducer.mCurrentBucketStartTimeNs);
334 EXPECT_EQ(1, countProducer.getCurrentBucketNum());
335 EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
336 }
337
TEST_P(CountMetricProducerTest_PartialBucket,TestSplitInNextBucket)338 TEST_P(CountMetricProducerTest_PartialBucket, TestSplitInNextBucket) {
339 int64_t bucketStartTimeNs = 10000000000;
340 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
341 int64_t eventTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
342
343 int tagId = 1;
344 int conditionTagId = 2;
345
346 CountMetric metric;
347 metric.set_id(1);
348 metric.set_bucket(ONE_MINUTE);
349 metric.set_split_bucket_for_app_upgrade(true);
350
351 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
352 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
353 CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, {}, wizard,
354 protoHash, bucketStartTimeNs, bucketStartTimeNs, provider);
355
356 // Bucket is flushed yet.
357 LogEvent event1(/*uid=*/0, /*pid=*/0);
358 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId, /*uid=*/"111");
359 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
360 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
361
362 // App upgrade or boot complete forces bucket flush.
363 // Check that there's a past bucket and the bucket end is not adjusted since the upgrade
364 // occurred after the bucket end time.
365 switch (GetParam()) {
366 case APP_UPGRADE:
367 countProducer.notifyAppUpgrade(eventTimeNs);
368 break;
369 case BOOT_COMPLETE:
370 countProducer.onStatsdInitCompleted(eventTimeNs);
371 break;
372 }
373 ASSERT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
374 EXPECT_EQ(bucketStartTimeNs,
375 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
376 EXPECT_EQ(bucketStartTimeNs + bucketSizeNs,
377 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
378 EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
379
380 // Next event occurs in same bucket as partial bucket created.
381 LogEvent event2(/*uid=*/0, /*pid=*/0);
382 makeLogEvent(&event2, bucketStartTimeNs + 70 * NS_PER_SEC + 10, tagId, /*uid=*/"222");
383 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
384 ASSERT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
385
386 // Third event in following bucket.
387 LogEvent event3(/*uid=*/0, /*pid=*/0);
388 makeLogEvent(&event3, bucketStartTimeNs + 121 * NS_PER_SEC + 10, tagId, /*uid=*/"333");
389 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
390 ASSERT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
391 EXPECT_EQ((int64_t)eventTimeNs,
392 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketStartNs);
393 EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs,
394 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
395 }
396
TEST(CountMetricProducerTest,TestSplitOnAppUpgradeDisabled)397 TEST(CountMetricProducerTest, TestSplitOnAppUpgradeDisabled) {
398 sp<AlarmMonitor> alarmMonitor;
399 int64_t bucketStartTimeNs = 10000000000;
400 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
401 int64_t eventTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
402
403 int tagId = 1;
404 int conditionTagId = 2;
405 CountMetric metric;
406 metric.set_id(1);
407 metric.set_bucket(ONE_MINUTE);
408 metric.set_split_bucket_for_app_upgrade(false);
409 Alert alert;
410 alert.set_num_buckets(3);
411 alert.set_trigger_if_sum_gt(2);
412
413 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
414 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
415 CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, {}, wizard,
416 protoHash, bucketStartTimeNs, bucketStartTimeNs, provider);
417
418 sp<AnomalyTracker> anomalyTracker =
419 countProducer.addAnomalyTracker(alert, alarmMonitor, UPDATE_NEW, bucketStartTimeNs);
420 EXPECT_TRUE(anomalyTracker != nullptr);
421
422 LogEvent event1(/*uid=*/0, /*pid=*/0);
423 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId, /*uid=*/"111");
424 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
425 ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
426 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
427
428 // App upgrade event occurs. Make sure no bucket is split.
429 // Check that there's a past bucket and the bucket end is not adjusted.
430 countProducer.notifyAppUpgrade(eventTimeNs);
431
432 ASSERT_EQ(0UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
433 EXPECT_EQ(0, countProducer.getCurrentBucketNum());
434 EXPECT_EQ(bucketStartTimeNs, countProducer.mCurrentBucketStartTimeNs);
435 // Anomaly tracker only contains full buckets.
436 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
437
438 int64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
439 // Next event occurs in the first bucket.
440 LogEvent event2(/*uid=*/0, /*pid=*/0);
441 makeLogEvent(&event2, eventTimeNs + 10 * NS_PER_SEC, tagId, /*uid=*/"222");
442 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
443 ASSERT_EQ(0UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
444 EXPECT_EQ(bucketStartTimeNs, countProducer.mCurrentBucketStartTimeNs);
445 EXPECT_EQ(0, countProducer.getCurrentBucketNum());
446 EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
447
448 // Third event in following bucket.
449 LogEvent event3(/*uid=*/0, /*pid=*/0);
450 makeLogEvent(&event3, bucketStartTimeNs + 62 * NS_PER_SEC + 10, tagId, /*uid=*/"333");
451 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
452 ASSERT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
453 EXPECT_EQ(bucketStartTimeNs,
454 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
455 EXPECT_EQ(bucketStartTimeNs + 60 * NS_PER_SEC,
456 countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
457 EXPECT_EQ(2, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mCount);
458 EXPECT_EQ(bucketStartTimeNs + 60 * NS_PER_SEC, countProducer.mCurrentBucketStartTimeNs);
459 EXPECT_EQ(1, countProducer.getCurrentBucketNum());
460 EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
461 }
462
TEST(CountMetricProducerTest,TestAnomalyDetectionUnSliced)463 TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
464 sp<AlarmMonitor> alarmMonitor;
465 Alert alert;
466 alert.set_id(11);
467 alert.set_metric_id(1);
468 alert.set_trigger_if_sum_gt(2);
469 alert.set_num_buckets(2);
470 const int32_t refPeriodSec = 1;
471 alert.set_refractory_period_secs(refPeriodSec);
472
473 int64_t bucketStartTimeNs = 10000000000;
474 int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
475 int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
476 int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
477
478 CountMetric metric;
479 metric.set_id(1);
480 metric.set_bucket(ONE_MINUTE);
481
482 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
483 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
484 CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, {},
485 wizard, protoHash, bucketStartTimeNs, bucketStartTimeNs,
486 provider);
487
488 sp<AnomalyTracker> anomalyTracker =
489 countProducer.addAnomalyTracker(alert, alarmMonitor, UPDATE_NEW, bucketStartTimeNs);
490
491 int tagId = 1;
492 LogEvent event1(/*uid=*/0, /*pid=*/0);
493 makeLogEvent(&event1, bucketStartTimeNs + 1, tagId);
494 LogEvent event2(/*uid=*/0, /*pid=*/0);
495 makeLogEvent(&event2, bucketStartTimeNs + 2, tagId);
496 LogEvent event3(/*uid=*/0, /*pid=*/0);
497 makeLogEvent(&event3, bucketStartTimeNs + 2 * bucketSizeNs + 1, tagId);
498 LogEvent event4(/*uid=*/0, /*pid=*/0);
499 makeLogEvent(&event4, bucketStartTimeNs + 3 * bucketSizeNs + 1, tagId);
500 LogEvent event5(/*uid=*/0, /*pid=*/0);
501 makeLogEvent(&event5, bucketStartTimeNs + 3 * bucketSizeNs + 2, tagId);
502 LogEvent event6(/*uid=*/0, /*pid=*/0);
503 makeLogEvent(&event6, bucketStartTimeNs + 3 * bucketSizeNs + 3, tagId);
504 LogEvent event7(/*uid=*/0, /*pid=*/0);
505 makeLogEvent(&event7, bucketStartTimeNs + 3 * bucketSizeNs + 2 * NS_PER_SEC, tagId);
506
507 // Two events in bucket #0.
508 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
509 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
510
511 ASSERT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
512 EXPECT_EQ(2L, countProducer.mCurrentSlicedCounter->begin()->second);
513 EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY), 0U);
514
515 // One event in bucket #2. No alarm as bucket #0 is trashed out.
516 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
517 ASSERT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
518 EXPECT_EQ(1L, countProducer.mCurrentSlicedCounter->begin()->second);
519 EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY), 0U);
520
521 // Two events in bucket #3.
522 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event4);
523 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event5);
524 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event6);
525 ASSERT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
526 EXPECT_EQ(3L, countProducer.mCurrentSlicedCounter->begin()->second);
527 // Anomaly at event 6 is within refractory period. The alarm is at event 5 timestamp not event 6
528 EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY),
529 std::ceil(1.0 * event5.GetElapsedTimestampNs() / NS_PER_SEC + refPeriodSec));
530
531 countProducer.onMatchedLogEvent(1 /*log matcher index*/, event7);
532 ASSERT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
533 EXPECT_EQ(4L, countProducer.mCurrentSlicedCounter->begin()->second);
534 EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY),
535 std::ceil(1.0 * event7.GetElapsedTimestampNs() / NS_PER_SEC + refPeriodSec));
536 }
537
TEST(CountMetricProducerTest,TestOneWeekTimeUnit)538 TEST(CountMetricProducerTest, TestOneWeekTimeUnit) {
539 CountMetric metric;
540 metric.set_id(1);
541 metric.set_bucket(ONE_WEEK);
542
543 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
544
545 int64_t oneDayNs = 24 * 60 * 60 * 1e9;
546 int64_t fiveWeeksNs = 5 * 7 * oneDayNs;
547
548 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
549 CountMetricProducer countProducer(kConfigKey, metric, -1 /* meaning no condition */, {}, wizard,
550 protoHash, oneDayNs, fiveWeeksNs, provider);
551
552 int64_t fiveWeeksOneDayNs = fiveWeeksNs + oneDayNs;
553
554 EXPECT_EQ(fiveWeeksNs, countProducer.mCurrentBucketStartTimeNs);
555 EXPECT_EQ(4, countProducer.mCurrentBucketNum);
556 EXPECT_EQ(fiveWeeksOneDayNs, countProducer.getCurrentBucketEndTimeNs());
557 }
558
TEST(CountMetricProducerTest,TestCorruptedDataReason_WhatLoss)559 TEST(CountMetricProducerTest, TestCorruptedDataReason_WhatLoss) {
560 const int64_t bucketStartTimeNs = 10000000000;
561 const int tagId = 1;
562 const int conditionId = 10;
563
564 CountMetric metric;
565 metric.set_id(1);
566 metric.set_bucket(ONE_MINUTE);
567
568 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
569 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
570 CountMetricProducer countProducer(kConfigKey, metric, 0 /*condition index*/,
571 {ConditionState::kUnknown}, wizard, protoHash,
572 bucketStartTimeNs, bucketStartTimeNs, provider);
573
574 countProducer.onMatchedLogEventLost(tagId, DATA_CORRUPTED_SOCKET_LOSS,
575 MetricProducer::LostAtomType::kWhat);
576 {
577 // Check dump report content.
578 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 50);
579 EXPECT_THAT(report.data_corrupted_reason(), ElementsAre(DATA_CORRUPTED_SOCKET_LOSS));
580 }
581
582 countProducer.onMatchedLogEventLost(tagId, DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW,
583 MetricProducer::LostAtomType::kWhat);
584 {
585 // Check dump report content.
586 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 150);
587 EXPECT_THAT(report.data_corrupted_reason(),
588 ElementsAre(DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW));
589 }
590
591 countProducer.onMatchedLogEventLost(tagId, DATA_CORRUPTED_SOCKET_LOSS,
592 MetricProducer::LostAtomType::kWhat);
593 countProducer.onMatchedLogEventLost(tagId, DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW,
594 MetricProducer::LostAtomType::kWhat);
595 {
596 // Check dump report content.
597 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 250);
598 EXPECT_THAT(report.data_corrupted_reason(),
599 ElementsAre(DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW, DATA_CORRUPTED_SOCKET_LOSS));
600 }
601 }
602
TEST(CountMetricProducerTest,TestCorruptedDataReason_ConditionLoss)603 TEST(CountMetricProducerTest, TestCorruptedDataReason_ConditionLoss) {
604 const int64_t bucketStartTimeNs = 10000000000;
605 const int conditionId = 10;
606
607 CountMetric metric;
608 metric.set_id(1);
609 metric.set_bucket(ONE_MINUTE);
610
611 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
612 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
613 CountMetricProducer countProducer(kConfigKey, metric, 0 /*condition index*/,
614 {ConditionState::kUnknown}, wizard, protoHash,
615 bucketStartTimeNs, bucketStartTimeNs, provider);
616
617 countProducer.onMatchedLogEventLost(conditionId, DATA_CORRUPTED_SOCKET_LOSS,
618 MetricProducer::LostAtomType::kCondition);
619 {
620 // Check dump report content.
621 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 50);
622 EXPECT_THAT(report.data_corrupted_reason(), ElementsAre(DATA_CORRUPTED_SOCKET_LOSS));
623 }
624
625 countProducer.onMatchedLogEventLost(conditionId, DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW,
626 MetricProducer::LostAtomType::kCondition);
627 {
628 // Check dump report content.
629 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 150);
630 EXPECT_THAT(report.data_corrupted_reason(),
631 ElementsAre(DATA_CORRUPTED_EVENT_QUEUE_OVERFLOW, DATA_CORRUPTED_SOCKET_LOSS));
632 }
633 }
634
TEST(CountMetricProducerTest,TestCorruptedDataReason_StateLoss)635 TEST(CountMetricProducerTest, TestCorruptedDataReason_StateLoss) {
636 const int64_t bucketStartTimeNs = 10000000000;
637 const int stateAtomId = 10;
638
639 CountMetric metric;
640 metric.set_id(1);
641 metric.set_bucket(ONE_MINUTE);
642
643 sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
644 sp<MockConfigMetadataProvider> provider = makeMockConfigMetadataProvider(/*enabled=*/false);
645 CountMetricProducer countProducer(kConfigKey, metric, 0 /*condition index*/,
646 {ConditionState::kUnknown}, wizard, protoHash,
647 bucketStartTimeNs, bucketStartTimeNs, provider);
648
649 countProducer.onStateEventLost(stateAtomId, DATA_CORRUPTED_SOCKET_LOSS);
650 {
651 // Check dump report content.
652 ProtoOutputStream output;
653 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 50);
654 EXPECT_THAT(report.data_corrupted_reason(), ElementsAre(DATA_CORRUPTED_SOCKET_LOSS));
655 }
656
657 // validation that data corruption signal remains accurate after another dump
658 {
659 // Check dump report content.
660 StatsLogReport report = onDumpReport(countProducer, bucketStartTimeNs + 150);
661 EXPECT_THAT(report.data_corrupted_reason(), ElementsAre(DATA_CORRUPTED_SOCKET_LOSS));
662 }
663 }
664
665 } // namespace statsd
666 } // namespace os
667 } // namespace android
668 #else
669 GTEST_LOG_(INFO) << "This test does nothing.\n";
670 #endif
671