xref: /aosp_15_r20/external/cronet/components/metrics/metrics_service_unittest.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/metrics/metrics_service.h"
6 
7 #include <stdint.h>
8 
9 #include <algorithm>
10 #include <memory>
11 #include <string>
12 
13 #include "base/containers/contains.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/bind.h"
18 #include "base/memory/raw_ptr.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram_functions.h"
21 #include "base/metrics/histogram_snapshot_manager.h"
22 #include "base/metrics/metrics_hashes.h"
23 #include "base/metrics/statistics_recorder.h"
24 #include "base/metrics/user_metrics.h"
25 #include "base/task/single_thread_task_runner.h"
26 #include "base/test/bind.h"
27 #include "base/test/metrics/histogram_tester.h"
28 #include "base/test/scoped_feature_list.h"
29 #include "base/test/task_environment.h"
30 #include "base/threading/platform_thread.h"
31 #include "build/build_config.h"
32 #include "components/metrics/clean_exit_beacon.h"
33 #include "components/metrics/client_info.h"
34 #include "components/metrics/cloned_install_detector.h"
35 #include "components/metrics/environment_recorder.h"
36 #include "components/metrics/log_decoder.h"
37 #include "components/metrics/metrics_features.h"
38 #include "components/metrics/metrics_log.h"
39 #include "components/metrics/metrics_pref_names.h"
40 #include "components/metrics/metrics_scheduler.h"
41 #include "components/metrics/metrics_state_manager.h"
42 #include "components/metrics/metrics_upload_scheduler.h"
43 #include "components/metrics/stability_metrics_helper.h"
44 #include "components/metrics/test/test_enabled_state_provider.h"
45 #include "components/metrics/test/test_metrics_provider.h"
46 #include "components/metrics/test/test_metrics_service_client.h"
47 #include "components/metrics/unsent_log_store_metrics_impl.h"
48 #include "components/prefs/testing_pref_service.h"
49 #include "components/variations/active_field_trials.h"
50 #include "testing/gtest/include/gtest/gtest.h"
51 #include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
52 #include "third_party/metrics_proto/system_profile.pb.h"
53 #include "third_party/zlib/google/compression_utils.h"
54 
55 namespace metrics {
56 namespace {
57 
58 const char kTestPrefName[] = "TestPref";
59 
60 class TestUnsentLogStore : public UnsentLogStore {
61  public:
TestUnsentLogStore(PrefService * service)62   explicit TestUnsentLogStore(PrefService* service)
63       : UnsentLogStore(std::make_unique<UnsentLogStoreMetricsImpl>(),
64                        service,
65                        kTestPrefName,
66                        nullptr,
67                        // Set to 3 so logs are not dropped in the test.
68                        UnsentLogStore::UnsentLogStoreLimits{
69                            .min_log_count = 3,
70                        },
71                        /*signing_key=*/std::string(),
72                        /*logs_event_manager=*/nullptr) {}
73   ~TestUnsentLogStore() override = default;
74 
75   TestUnsentLogStore(const TestUnsentLogStore&) = delete;
76   TestUnsentLogStore& operator=(const TestUnsentLogStore&) = delete;
77 
RegisterPrefs(PrefRegistrySimple * registry)78   static void RegisterPrefs(PrefRegistrySimple* registry) {
79     registry->RegisterListPref(kTestPrefName);
80   }
81 };
82 
83 // Returns true if |id| is present in |proto|'s collection of FieldTrials.
IsFieldTrialPresent(const SystemProfileProto & proto,const std::string & trial_name,const std::string & group_name)84 bool IsFieldTrialPresent(const SystemProfileProto& proto,
85                          const std::string& trial_name,
86                          const std::string& group_name) {
87   const variations::ActiveGroupId id =
88       variations::MakeActiveGroupId(trial_name, group_name);
89 
90   for (const auto& trial : proto.field_trial()) {
91     if (trial.name_id() == id.name && trial.group_id() == id.group)
92       return true;
93   }
94   return false;
95 }
96 
97 class TestMetricsService : public MetricsService {
98  public:
TestMetricsService(MetricsStateManager * state_manager,MetricsServiceClient * client,PrefService * local_state)99   TestMetricsService(MetricsStateManager* state_manager,
100                      MetricsServiceClient* client,
101                      PrefService* local_state)
102       : MetricsService(state_manager, client, local_state) {}
103 
104   TestMetricsService(const TestMetricsService&) = delete;
105   TestMetricsService& operator=(const TestMetricsService&) = delete;
106 
107   ~TestMetricsService() override = default;
108 
109   using MetricsService::INIT_TASK_DONE;
110   using MetricsService::INIT_TASK_SCHEDULED;
111   using MetricsService::RecordCurrentEnvironmentHelper;
112   using MetricsService::SENDING_LOGS;
113   using MetricsService::state;
114 
115   // MetricsService:
SetPersistentSystemProfile(const std::string & serialized_proto,bool complete)116   void SetPersistentSystemProfile(const std::string& serialized_proto,
117                                   bool complete) override {
118     persistent_system_profile_provided_ = true;
119     persistent_system_profile_complete_ = complete;
120   }
121 
persistent_system_profile_provided() const122   bool persistent_system_profile_provided() const {
123     return persistent_system_profile_provided_;
124   }
persistent_system_profile_complete() const125   bool persistent_system_profile_complete() const {
126     return persistent_system_profile_complete_;
127   }
128 
129  private:
130   bool persistent_system_profile_provided_ = false;
131   bool persistent_system_profile_complete_ = false;
132 };
133 
134 class TestMetricsLog : public MetricsLog {
135  public:
TestMetricsLog(const std::string & client_id,int session_id,MetricsServiceClient * client)136   TestMetricsLog(const std::string& client_id,
137                  int session_id,
138                  MetricsServiceClient* client)
139       : MetricsLog(client_id, session_id, MetricsLog::ONGOING_LOG, client) {}
140 
141   TestMetricsLog(const TestMetricsLog&) = delete;
142   TestMetricsLog& operator=(const TestMetricsLog&) = delete;
143 
144   ~TestMetricsLog() override = default;
145 };
146 
147 const char kOnDidCreateMetricsLogHistogramName[] = "Test.OnDidCreateMetricsLog";
148 
149 class TestMetricsProviderForOnDidCreateMetricsLog : public TestMetricsProvider {
150  public:
151   TestMetricsProviderForOnDidCreateMetricsLog() = default;
152   ~TestMetricsProviderForOnDidCreateMetricsLog() override = default;
153 
OnDidCreateMetricsLog()154   void OnDidCreateMetricsLog() override {
155     base::UmaHistogramBoolean(kOnDidCreateMetricsLogHistogramName, true);
156   }
157 };
158 
159 const char kProvideHistogramsHistogramName[] = "Test.ProvideHistograms";
160 
161 class TestMetricsProviderForProvideHistograms : public TestMetricsProvider {
162  public:
163   TestMetricsProviderForProvideHistograms() = default;
164   ~TestMetricsProviderForProvideHistograms() override = default;
165 
ProvideHistograms()166   bool ProvideHistograms() override {
167     base::UmaHistogramBoolean(kProvideHistogramsHistogramName, true);
168     return true;
169   }
170 
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)171   void ProvideCurrentSessionData(
172       ChromeUserMetricsExtension* uma_proto) override {
173     MetricsProvider::ProvideCurrentSessionData(uma_proto);
174   }
175 };
176 
177 class TestMetricsProviderForProvideHistogramsEarlyReturn
178     : public TestMetricsProviderForProvideHistograms {
179  public:
180   TestMetricsProviderForProvideHistogramsEarlyReturn() = default;
181   ~TestMetricsProviderForProvideHistogramsEarlyReturn() override = default;
182 
OnDidCreateMetricsLog()183   void OnDidCreateMetricsLog() override {}
184 };
185 
186 class TestIndependentMetricsProvider : public MetricsProvider {
187  public:
188   TestIndependentMetricsProvider() = default;
189   ~TestIndependentMetricsProvider() override = default;
190 
191   // MetricsProvider:
HasIndependentMetrics()192   bool HasIndependentMetrics() override {
193     // Only return true the first time this is called (i.e., we only have one
194     // independent log to provide).
195     if (!has_independent_metrics_called_) {
196       has_independent_metrics_called_ = true;
197       return true;
198     }
199     return false;
200   }
ProvideIndependentMetrics(base::OnceClosure serialize_log_callback,base::OnceCallback<void (bool)> done_callback,ChromeUserMetricsExtension * uma_proto,base::HistogramSnapshotManager * snapshot_manager)201   void ProvideIndependentMetrics(
202       base::OnceClosure serialize_log_callback,
203       base::OnceCallback<void(bool)> done_callback,
204       ChromeUserMetricsExtension* uma_proto,
205       base::HistogramSnapshotManager* snapshot_manager) override {
206     provide_independent_metrics_called_ = true;
207     uma_proto->set_client_id(123);
208     std::move(done_callback).Run(true);
209   }
210 
has_independent_metrics_called() const211   bool has_independent_metrics_called() const {
212     return has_independent_metrics_called_;
213   }
214 
provide_independent_metrics_called() const215   bool provide_independent_metrics_called() const {
216     return provide_independent_metrics_called_;
217   }
218 
219  private:
220   bool has_independent_metrics_called_ = false;
221   bool provide_independent_metrics_called_ = false;
222 };
223 
224 class MetricsServiceTest : public testing::Test {
225  public:
MetricsServiceTest()226   MetricsServiceTest()
227       : enabled_state_provider_(new TestEnabledStateProvider(false, false)) {
228     base::SetRecordActionTaskRunner(
229         task_environment_.GetMainThreadTaskRunner());
230     MetricsService::RegisterPrefs(testing_local_state_.registry());
231   }
232 
233   MetricsServiceTest(const MetricsServiceTest&) = delete;
234   MetricsServiceTest& operator=(const MetricsServiceTest&) = delete;
235 
236   ~MetricsServiceTest() override = default;
237 
SetUp()238   void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
239 
GetMetricsStateManager(const base::FilePath & user_data_dir=base::FilePath (),StartupVisibility startup_visibility=StartupVisibility::kUnknown)240   MetricsStateManager* GetMetricsStateManager(
241       const base::FilePath& user_data_dir = base::FilePath(),
242       StartupVisibility startup_visibility = StartupVisibility::kUnknown) {
243     // Lazy-initialize the metrics_state_manager so that it correctly reads the
244     // stability state from prefs after tests have a chance to initialize it.
245     if (!metrics_state_manager_) {
246       metrics_state_manager_ = MetricsStateManager::Create(
247           GetLocalState(), enabled_state_provider_.get(), std::wstring(),
248           user_data_dir, startup_visibility);
249       metrics_state_manager_->InstantiateFieldTrialList();
250     }
251     return metrics_state_manager_.get();
252   }
253 
InitializeTestLogStoreAndGet()254   std::unique_ptr<TestUnsentLogStore> InitializeTestLogStoreAndGet() {
255     TestUnsentLogStore::RegisterPrefs(testing_local_state_.registry());
256     return std::make_unique<TestUnsentLogStore>(GetLocalState());
257   }
258 
GetLocalState()259   PrefService* GetLocalState() { return &testing_local_state_; }
260 
261   // Sets metrics reporting as enabled for testing.
EnableMetricsReporting()262   void EnableMetricsReporting() { SetMetricsReporting(true); }
263 
264   // Sets metrics reporting for testing.
SetMetricsReporting(bool enabled)265   void SetMetricsReporting(bool enabled) {
266     enabled_state_provider_->set_consent(enabled);
267     enabled_state_provider_->set_enabled(enabled);
268   }
269 
270   // Finds a histogram with the specified |name_hash| in |histograms|.
FindHistogram(const base::StatisticsRecorder::Histograms & histograms,uint64_t name_hash)271   const base::HistogramBase* FindHistogram(
272       const base::StatisticsRecorder::Histograms& histograms,
273       uint64_t name_hash) {
274     for (const base::HistogramBase* histogram : histograms) {
275       if (name_hash == base::HashMetricName(histogram->histogram_name()))
276         return histogram;
277     }
278     return nullptr;
279   }
280 
281   // Checks whether |uma_log| contains any histograms that are not flagged
282   // with kUmaStabilityHistogramFlag. Stability logs should only contain such
283   // histograms.
CheckForNonStabilityHistograms(const ChromeUserMetricsExtension & uma_log)284   void CheckForNonStabilityHistograms(
285       const ChromeUserMetricsExtension& uma_log) {
286     const int kStabilityFlags = base::HistogramBase::kUmaStabilityHistogramFlag;
287     const base::StatisticsRecorder::Histograms histograms =
288         base::StatisticsRecorder::GetHistograms();
289     for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
290       const uint64_t hash = uma_log.histogram_event(i).name_hash();
291 
292       const base::HistogramBase* histogram = FindHistogram(histograms, hash);
293       EXPECT_TRUE(histogram) << hash;
294 
295       EXPECT_TRUE(histogram->HasFlags(kStabilityFlags)) << hash;
296     }
297   }
298 
299   // Returns the number of samples logged to the specified histogram or 0 if
300   // the histogram was not found.
GetHistogramSampleCount(const ChromeUserMetricsExtension & uma_log,base::StringPiece histogram_name)301   int GetHistogramSampleCount(const ChromeUserMetricsExtension& uma_log,
302                               base::StringPiece histogram_name) {
303     const auto histogram_name_hash = base::HashMetricName(histogram_name);
304     int samples = 0;
305     for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
306       const auto& histogram = uma_log.histogram_event(i);
307       if (histogram.name_hash() == histogram_name_hash) {
308         for (int j = 0; j < histogram.bucket_size(); ++j) {
309           const auto& bucket = histogram.bucket(j);
310           // Per proto comments, count field not being set means 1 sample.
311           samples += (!bucket.has_count() ? 1 : bucket.count());
312         }
313       }
314     }
315     return samples;
316   }
317 
318   // Returns the sampled count of the |kOnDidCreateMetricsLogHistogramName|
319   // histogram in the currently staged log in |test_log_store|.
GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore * test_log_store)320   int GetSampleCountOfOnDidCreateLogHistogram(MetricsLogStore* test_log_store) {
321     ChromeUserMetricsExtension log;
322     EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
323     return GetHistogramSampleCount(log, kOnDidCreateMetricsLogHistogramName);
324   }
325 
GetNumberOfUserActions(MetricsLogStore * test_log_store)326   int GetNumberOfUserActions(MetricsLogStore* test_log_store) {
327     ChromeUserMetricsExtension log;
328     EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &log));
329     return log.user_action_event_size();
330   }
331 
user_data_dir_path()332   const base::FilePath user_data_dir_path() { return temp_dir_.GetPath(); }
333 
334  protected:
335   base::test::TaskEnvironment task_environment_{
336       base::test::TaskEnvironment::TimeSource::MOCK_TIME};
337   base::test::ScopedFeatureList feature_list_;
338 
339  private:
340   std::unique_ptr<TestEnabledStateProvider> enabled_state_provider_;
341   TestingPrefServiceSimple testing_local_state_;
342   std::unique_ptr<MetricsStateManager> metrics_state_manager_;
343   base::ScopedTempDir temp_dir_;
344 };
345 
346 class MetricsServiceTestWithFeatures
347     : public MetricsServiceTest,
348       public ::testing::WithParamInterface<std::tuple<bool>> {
349  public:
350   MetricsServiceTestWithFeatures() = default;
351   ~MetricsServiceTestWithFeatures() override = default;
352 
ShouldSnapshotInBg()353   bool ShouldSnapshotInBg() { return std::get<0>(GetParam()); }
354 
SetUp()355   void SetUp() override {
356     MetricsServiceTest::SetUp();
357     std::vector<base::test::FeatureRefAndParams> enabled_features;
358     std::vector<base::test::FeatureRef> disabled_features;
359 
360     if (ShouldSnapshotInBg()) {
361       enabled_features.emplace_back(features::kMetricsServiceDeltaSnapshotInBg,
362                                     base::FieldTrialParams());
363     } else {
364       disabled_features.emplace_back(
365           features::kMetricsServiceDeltaSnapshotInBg);
366     }
367 
368     feature_list_.InitWithFeaturesAndParameters(enabled_features,
369                                                 disabled_features);
370   }
371 
372  private:
373   base::test::ScopedFeatureList feature_list_;
374 };
375 
376 struct StartupVisibilityTestParams {
377   metrics::StartupVisibility startup_visibility;
378   bool expected_beacon_value;
379 };
380 
381 class MetricsServiceTestWithStartupVisibility
382     : public MetricsServiceTest,
383       public ::testing::WithParamInterface<
384           std::tuple<StartupVisibilityTestParams, bool>> {
385  public:
386   MetricsServiceTestWithStartupVisibility() = default;
387   ~MetricsServiceTestWithStartupVisibility() override = default;
388 
ShouldSnapshotInBg()389   bool ShouldSnapshotInBg() { return std::get<1>(GetParam()); }
390 
SetUp()391   void SetUp() override {
392     MetricsServiceTest::SetUp();
393     std::vector<base::test::FeatureRefAndParams> enabled_features;
394     std::vector<base::test::FeatureRef> disabled_features;
395 
396     if (ShouldSnapshotInBg()) {
397       enabled_features.emplace_back(features::kMetricsServiceDeltaSnapshotInBg,
398                                     base::FieldTrialParams());
399     } else {
400       disabled_features.emplace_back(
401           features::kMetricsServiceDeltaSnapshotInBg);
402     }
403 
404     feature_list_.InitWithFeaturesAndParameters(enabled_features,
405                                                 disabled_features);
406   }
407 
408  private:
409   base::test::ScopedFeatureList feature_list_;
410 };
411 
412 class ExperimentTestMetricsProvider : public TestMetricsProvider {
413  public:
ExperimentTestMetricsProvider(base::FieldTrial * profile_metrics_trial,base::FieldTrial * session_data_trial)414   explicit ExperimentTestMetricsProvider(
415       base::FieldTrial* profile_metrics_trial,
416       base::FieldTrial* session_data_trial)
417       : profile_metrics_trial_(profile_metrics_trial),
418         session_data_trial_(session_data_trial) {}
419 
420   ~ExperimentTestMetricsProvider() override = default;
421 
ProvideSystemProfileMetrics(SystemProfileProto * system_profile_proto)422   void ProvideSystemProfileMetrics(
423       SystemProfileProto* system_profile_proto) override {
424     TestMetricsProvider::ProvideSystemProfileMetrics(system_profile_proto);
425     profile_metrics_trial_->Activate();
426   }
427 
ProvideCurrentSessionData(ChromeUserMetricsExtension * uma_proto)428   void ProvideCurrentSessionData(
429       ChromeUserMetricsExtension* uma_proto) override {
430     TestMetricsProvider::ProvideCurrentSessionData(uma_proto);
431     session_data_trial_->Activate();
432   }
433 
434  private:
435   raw_ptr<base::FieldTrial> profile_metrics_trial_;
436   raw_ptr<base::FieldTrial> session_data_trial_;
437 };
438 
HistogramExists(base::StringPiece name)439 bool HistogramExists(base::StringPiece name) {
440   return base::StatisticsRecorder::FindHistogram(name) != nullptr;
441 }
442 
GetHistogramDeltaTotalCount(base::StringPiece name)443 base::HistogramBase::Count GetHistogramDeltaTotalCount(base::StringPiece name) {
444   return base::StatisticsRecorder::FindHistogram(name)
445       ->SnapshotDelta()
446       ->TotalCount();
447 }
448 
449 }  // namespace
450 
451 INSTANTIATE_TEST_SUITE_P(All,
452                          MetricsServiceTestWithFeatures,
453                          ::testing::Combine(::testing::Bool()));
454 
TEST_P(MetricsServiceTestWithFeatures,RecordId)455 TEST_P(MetricsServiceTestWithFeatures, RecordId) {
456   EnableMetricsReporting();
457   GetMetricsStateManager(user_data_dir_path())->ForceClientIdCreation();
458 
459   // Set an initial value for the record-ids, to make them predictable.
460   GetLocalState()->SetInteger(prefs::kMetricsLogRecordId, 1000);
461 
462   TestMetricsServiceClient client;
463   TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
464                              &client, GetLocalState());
465 
466   auto log1 = service.CreateLogForTesting(MetricsLog::ONGOING_LOG);
467   auto log2 = service.CreateLogForTesting(MetricsLog::INITIAL_STABILITY_LOG);
468   auto log3 = service.CreateLogForTesting(MetricsLog::INDEPENDENT_LOG);
469 
470   EXPECT_EQ(1001, log1->uma_proto()->record_id());
471   EXPECT_EQ(1002, log2->uma_proto()->record_id());
472   EXPECT_EQ(1003, log3->uma_proto()->record_id());
473 }
474 
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAfterCleanShutDown)475 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAfterCleanShutDown) {
476   base::HistogramTester histogram_tester;
477   EnableMetricsReporting();
478   // Write a beacon file indicating that Chrome exited cleanly. Note that the
479   // crash streak value is arbitrary.
480   const base::FilePath beacon_file_path =
481       user_data_dir_path().Append(kCleanExitBeaconFilename);
482   ASSERT_TRUE(base::WriteFile(
483       beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
484                             /*exited_cleanly=*/true, /*crash_streak=*/1)));
485 
486   TestMetricsServiceClient client;
487   TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
488                              &client, GetLocalState());
489 
490   TestMetricsProvider* test_provider = new TestMetricsProvider();
491   service.RegisterMetricsProvider(
492       std::unique_ptr<MetricsProvider>(test_provider));
493 
494   service.InitializeMetricsRecordingState();
495 
496   // No initial stability log should be generated.
497   EXPECT_FALSE(service.has_unsent_logs());
498 
499   // Ensure that HasPreviousSessionData() is always called on providers,
500   // for consistency, even if other conditions already indicate their presence.
501   EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
502 
503   // The test provider should not have been called upon to provide initial
504   // stability nor regular stability metrics.
505   EXPECT_FALSE(test_provider->provide_initial_stability_metrics_called());
506   EXPECT_FALSE(test_provider->provide_stability_metrics_called());
507 
508   // As there wasn't an unclean shutdown, no browser crash samples should have
509   // been emitted.
510   histogram_tester.ExpectBucketCount("Stability.Counts2",
511                                      StabilityEventType::kBrowserCrash, 0);
512 }
513 
TEST_P(MetricsServiceTestWithFeatures,InitialStabilityLogAtProviderRequest)514 TEST_P(MetricsServiceTestWithFeatures, InitialStabilityLogAtProviderRequest) {
515   base::HistogramTester histogram_tester;
516   EnableMetricsReporting();
517 
518   // Save an existing system profile to prefs, to correspond to what would be
519   // saved from a previous session.
520   TestMetricsServiceClient client;
521   TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
522   // Manually override the log's session hash to something else to verify that
523   // stability logs created later on using this environment will contain that
524   // session hash.
525   uint64_t modified_session_hash =
526       log.uma_proto()->system_profile().session_hash() + 1;
527   log.uma_proto()->mutable_system_profile()->set_session_hash(
528       modified_session_hash);
529   DelegatingProvider delegating_provider;
530   TestMetricsService::RecordCurrentEnvironmentHelper(&log, GetLocalState(),
531                                                      &delegating_provider);
532 
533   // Record stability build time and version from previous session, so that
534   // stability metrics (including exited cleanly flag) won't be cleared.
535   EnvironmentRecorder(GetLocalState())
536       .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
537                               client.GetVersionString());
538 
539   // Write a beacon file indicating that Chrome exited cleanly. Note that the
540   // crash streak value is arbitrary.
541   const base::FilePath beacon_file_path =
542       user_data_dir_path().Append(kCleanExitBeaconFilename);
543   ASSERT_TRUE(base::WriteFile(
544       beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
545                             /*exited_cleanly=*/true, /*crash_streak=*/1)));
546 
547   TestMetricsService service(GetMetricsStateManager(user_data_dir_path()),
548                              &client, GetLocalState());
549   // Add a metrics provider that requests a stability log.
550   TestMetricsProvider* test_provider = new TestMetricsProvider();
551   test_provider->set_has_initial_stability_metrics(true);
552   service.RegisterMetricsProvider(
553       std::unique_ptr<MetricsProvider>(test_provider));
554 
555   service.InitializeMetricsRecordingState();
556 
557   // The initial stability log should be generated and persisted in unsent logs.
558   MetricsLogStore* test_log_store = service.LogStoreForTest();
559   EXPECT_TRUE(test_log_store->has_unsent_logs());
560   EXPECT_FALSE(test_log_store->has_staged_log());
561 
562   // Ensure that HasPreviousSessionData() is always called on providers,
563   // for consistency, even if other conditions already indicate their presence.
564   EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
565 
566   // The test provider should have been called upon to provide initial
567   // stability and regular stability metrics.
568   EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
569   EXPECT_TRUE(test_provider->provide_stability_metrics_called());
570 
571   // Stage the log and retrieve it.
572   test_log_store->StageNextLog();
573   EXPECT_TRUE(test_log_store->has_staged_log());
574 
575   ChromeUserMetricsExtension uma_log;
576   EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
577 
578   EXPECT_TRUE(uma_log.has_client_id());
579   EXPECT_TRUE(uma_log.has_session_id());
580   EXPECT_TRUE(uma_log.has_system_profile());
581   EXPECT_TRUE(uma_log.system_profile().has_session_hash());
582   EXPECT_EQ(modified_session_hash, uma_log.system_profile().session_hash());
583   EXPECT_EQ(0, uma_log.user_action_event_size());
584   EXPECT_EQ(0, uma_log.omnibox_event_size());
585   CheckForNonStabilityHistograms(uma_log);
586   EXPECT_EQ(
587       1, GetHistogramSampleCount(uma_log, "UMA.InitialStabilityRecordBeacon"));
588 
589   // As there wasn't an unclean shutdown, no browser crash samples should have
590   // been emitted.
591   histogram_tester.ExpectBucketCount("Stability.Counts2",
592                                      StabilityEventType::kBrowserCrash, 0);
593 }
594 
TEST_P(MetricsServiceTestWithFeatures,IndependentLogAtProviderRequest)595 TEST_P(MetricsServiceTestWithFeatures, IndependentLogAtProviderRequest) {
596   EnableMetricsReporting();
597   TestMetricsServiceClient client;
598   TestMetricsService service(GetMetricsStateManager(), &client,
599                              GetLocalState());
600 
601   // Create a a provider that will have one independent log to provide.
602   auto* test_provider = new TestIndependentMetricsProvider();
603   service.RegisterMetricsProvider(
604       std::unique_ptr<MetricsProvider>(test_provider));
605 
606   service.InitializeMetricsRecordingState();
607   // Start() will create the first ongoing log.
608   service.Start();
609   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
610 
611   // Verify that the independent log provider has not yet been called, and emit
612   // a histogram. This histogram should not be put into the independent log.
613   EXPECT_FALSE(test_provider->has_independent_metrics_called());
614   EXPECT_FALSE(test_provider->provide_independent_metrics_called());
615   const std::string test_histogram = "Test.Histogram";
616   base::UmaHistogramBoolean(test_histogram, true);
617 
618   // Fast forward the time by |initialization_delay|, which is when the pending
619   // init tasks will run.
620   base::TimeDelta initialization_delay = service.GetInitializationDelay();
621   task_environment_.FastForwardBy(initialization_delay);
622   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
623 
624   // Fast forward the time by another |initialization_delay|, which is when
625   // metrics providers are called to provide independent logs.
626   task_environment_.FastForwardBy(initialization_delay);
627   EXPECT_TRUE(test_provider->has_independent_metrics_called());
628   EXPECT_TRUE(test_provider->provide_independent_metrics_called());
629 
630   // Fast forward the time until the MetricsRotationScheduler first runs, which
631   // should complete the first ongoing log.
632   // Note: The first log is only created after N = GetInitialIntervalSeconds()
633   // seconds since the start, and since we already fast forwarded by
634   // |initialization_delay| twice, we only need to fast forward by
635   // N - 2 * |initialization_delay|.
636   task_environment_.FastForwardBy(
637       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
638       2 * initialization_delay);
639   EXPECT_EQ(TestMetricsService::SENDING_LOGS, service.state());
640 
641   MetricsLogStore* test_log_store = service.LogStoreForTest();
642 
643   // The currently staged log should be the independent log created by the
644   // independent log provider. The log should have a client id of 123. It should
645   // also not contain |test_histogram|.
646   ASSERT_TRUE(test_log_store->has_staged_log());
647   ChromeUserMetricsExtension uma_log;
648   EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
649   EXPECT_EQ(uma_log.client_id(), 123UL);
650   EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 0);
651 
652   // Discard the staged log and stage the next one. It should be the first
653   // ongoing log.
654   test_log_store->DiscardStagedLog();
655   ASSERT_TRUE(test_log_store->has_unsent_logs());
656   test_log_store->StageNextLog();
657   ASSERT_TRUE(test_log_store->has_staged_log());
658 
659   // Verify that the first ongoing log contains |test_histogram| (it should not
660   // have been put into the independent log).
661   EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
662   EXPECT_EQ(GetHistogramSampleCount(uma_log, test_histogram), 1);
663 }
664 
TEST_P(MetricsServiceTestWithFeatures,OnDidCreateMetricsLogAtShutdown)665 TEST_P(MetricsServiceTestWithFeatures, OnDidCreateMetricsLogAtShutdown) {
666   base::HistogramTester histogram_tester;
667   EnableMetricsReporting();
668   TestMetricsServiceClient client;
669 
670   TestMetricsService service(GetMetricsStateManager(), &client,
671                              GetLocalState());
672 
673   // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
674   // in OnDidCreateMetricsLog().
675   auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
676   service.RegisterMetricsProvider(
677       std::unique_ptr<MetricsProvider>(test_provider));
678 
679   service.InitializeMetricsRecordingState();
680   // Start() will create the first ongoing log.
681   service.Start();
682 
683   // OnDidCreateMetricsLog() is called once when the first ongoing log is
684   // created.
685   histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
686                                      1);
687   service.Stop();
688 
689   // OnDidCreateMetricsLog() will be called during shutdown to emit histograms.
690   histogram_tester.ExpectBucketCount(kOnDidCreateMetricsLogHistogramName, true,
691                                      2);
692 
693   // Clean up histograms.
694   base::StatisticsRecorder::ForgetHistogramForTesting(
695       kOnDidCreateMetricsLogHistogramName);
696 }
697 
TEST_P(MetricsServiceTestWithFeatures,ProvideHistograms)698 TEST_P(MetricsServiceTestWithFeatures, ProvideHistograms) {
699   base::HistogramTester histogram_tester;
700   EnableMetricsReporting();
701   TestMetricsServiceClient client;
702 
703   TestMetricsService service(GetMetricsStateManager(), &client,
704                              GetLocalState());
705 
706   // Create a provider that will log to |kProvideHistogramsHistogramName|
707   // in ProvideHistograms().
708   auto* test_provider = new TestMetricsProviderForProvideHistograms();
709   service.RegisterMetricsProvider(
710       std::unique_ptr<MetricsProvider>(test_provider));
711 
712   service.InitializeMetricsRecordingState();
713   // Start() will create the first ongoing log.
714   service.Start();
715 
716   // ProvideHistograms() is called in OnDidCreateMetricsLog().
717   histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
718 
719   service.StageCurrentLogForTest();
720 
721   histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
722 
723   service.Stop();
724 
725   // Clean up histograms.
726   base::StatisticsRecorder::ForgetHistogramForTesting(
727       kProvideHistogramsHistogramName);
728 }
729 
TEST_P(MetricsServiceTestWithFeatures,ProvideHistogramsEarlyReturn)730 TEST_P(MetricsServiceTestWithFeatures, ProvideHistogramsEarlyReturn) {
731   base::HistogramTester histogram_tester;
732   EnableMetricsReporting();
733   TestMetricsServiceClient client;
734 
735   TestMetricsService service(GetMetricsStateManager(), &client,
736                              GetLocalState());
737 
738   // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
739   // in OnDidCreateMetricsLog().
740   auto* test_provider =
741       new TestMetricsProviderForProvideHistogramsEarlyReturn();
742   service.RegisterMetricsProvider(
743       std::unique_ptr<MetricsProvider>(test_provider));
744 
745   service.InitializeMetricsRecordingState();
746   // Start() will create the first ongoing log.
747   service.Start();
748 
749   // Make sure no histogram is emitted when having an early return.
750   histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 0);
751 
752   service.StageCurrentLogForTest();
753   // ProvideHistograms() should be called in ProvideCurrentSessionData() if
754   // histograms haven't been emitted.
755   histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 1);
756 
757   // Try another log to make sure emission status is reset between logs.
758   service.LogStoreForTest()->DiscardStagedLog();
759   service.StageCurrentLogForTest();
760   histogram_tester.ExpectBucketCount(kProvideHistogramsHistogramName, true, 2);
761 
762   service.Stop();
763 
764   // Clean up histograms.
765   base::StatisticsRecorder::ForgetHistogramForTesting(
766       kProvideHistogramsHistogramName);
767 }
768 
769 INSTANTIATE_TEST_SUITE_P(
770     All,
771     MetricsServiceTestWithStartupVisibility,
772     ::testing::Combine(
773         ::testing::Values(
774             StartupVisibilityTestParams{
775                 .startup_visibility = StartupVisibility::kUnknown,
776                 .expected_beacon_value = true},
777             StartupVisibilityTestParams{
778                 .startup_visibility = StartupVisibility::kBackground,
779                 .expected_beacon_value = true},
780             StartupVisibilityTestParams{
781                 .startup_visibility = StartupVisibility::kForeground,
782                 .expected_beacon_value = false}),
783         ::testing::Bool()));
784 
TEST_P(MetricsServiceTestWithStartupVisibility,InitialStabilityLogAfterCrash)785 TEST_P(MetricsServiceTestWithStartupVisibility, InitialStabilityLogAfterCrash) {
786   base::HistogramTester histogram_tester;
787   PrefService* local_state = GetLocalState();
788   EnableMetricsReporting();
789 
790   // Write a beacon file indicating that Chrome exited uncleanly. Note that the
791   // crash streak value is arbitrary.
792   const base::FilePath beacon_file_path =
793       user_data_dir_path().Append(kCleanExitBeaconFilename);
794   ASSERT_TRUE(base::WriteFile(
795       beacon_file_path, CleanExitBeacon::CreateBeaconFileContentsForTesting(
796                             /*exited_cleanly=*/false, /*crash_streak=*/1)));
797 
798   // Set up prefs to simulate restarting after a crash.
799 
800   // Save an existing system profile to prefs, to correspond to what would be
801   // saved from a previous session.
802   TestMetricsServiceClient client;
803   const std::string kCrashedVersion = "4.0.321.0-64-devel";
804   client.set_version_string(kCrashedVersion);
805   TestMetricsLog log("0a94430b-18e5-43c8-a657-580f7e855ce1", 1, &client);
806   DelegatingProvider delegating_provider;
807   TestMetricsService::RecordCurrentEnvironmentHelper(&log, local_state,
808                                                      &delegating_provider);
809 
810   // Record stability build time and version from previous session, so that
811   // stability metrics (including exited cleanly flag) won't be cleared.
812   EnvironmentRecorder(local_state)
813       .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
814                               client.GetVersionString());
815 
816   const std::string kCurrentVersion = "5.0.322.0-64-devel";
817   client.set_version_string(kCurrentVersion);
818 
819   StartupVisibilityTestParams params = std::get<0>(GetParam());
820   TestMetricsService service(
821       GetMetricsStateManager(user_data_dir_path(), params.startup_visibility),
822       &client, local_state);
823   // Add a provider.
824   TestMetricsProvider* test_provider = new TestMetricsProvider();
825   service.RegisterMetricsProvider(
826       std::unique_ptr<MetricsProvider>(test_provider));
827   service.InitializeMetricsRecordingState();
828 
829   // Verify that Chrome is (or is not) watching for crashes by checking the
830   // beacon value.
831   std::string beacon_file_contents;
832   ASSERT_TRUE(base::ReadFileToString(beacon_file_path, &beacon_file_contents));
833   std::string partial_expected_contents;
834 #if BUILDFLAG(IS_ANDROID)
835   // Whether Chrome is watching for crashes after
836   // InitializeMetricsRecordingState() depends on the type of Android Chrome
837   // session. See the comments in MetricsService::InitializeMetricsState() for
838   // more details.
839   const std::string beacon_value =
840       params.expected_beacon_value ? "true" : "false";
841   partial_expected_contents = "exited_cleanly\":" + beacon_value;
842 #else
843   partial_expected_contents = "exited_cleanly\":false";
844 #endif  // BUILDFLAG(IS_ANDROID)
845   EXPECT_TRUE(base::Contains(beacon_file_contents, partial_expected_contents));
846 
847   // The initial stability log should be generated and persisted in unsent logs.
848   MetricsLogStore* test_log_store = service.LogStoreForTest();
849   EXPECT_TRUE(test_log_store->has_unsent_logs());
850   EXPECT_FALSE(test_log_store->has_staged_log());
851 
852   // Ensure that HasPreviousSessionData() is always called on providers,
853   // for consistency, even if other conditions already indicate their presence.
854   EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
855 
856   // The test provider should have been called upon to provide initial
857   // stability and regular stability metrics.
858   EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
859   EXPECT_TRUE(test_provider->provide_stability_metrics_called());
860 
861   // The test provider should have been called when the initial stability log
862   // was closed.
863   EXPECT_TRUE(test_provider->record_initial_histogram_snapshots_called());
864 
865   // Stage the log and retrieve it.
866   test_log_store->StageNextLog();
867   EXPECT_TRUE(test_log_store->has_staged_log());
868 
869   ChromeUserMetricsExtension uma_log;
870   EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
871 
872   EXPECT_TRUE(uma_log.has_client_id());
873   EXPECT_TRUE(uma_log.has_session_id());
874   EXPECT_TRUE(uma_log.has_system_profile());
875   EXPECT_EQ(0, uma_log.user_action_event_size());
876   EXPECT_EQ(0, uma_log.omnibox_event_size());
877   CheckForNonStabilityHistograms(uma_log);
878   EXPECT_EQ(
879       1, GetHistogramSampleCount(uma_log, "UMA.InitialStabilityRecordBeacon"));
880 
881   // Verify that the histograms emitted by the test provider made it into the
882   // log.
883   EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Initial"), 1);
884   EXPECT_EQ(GetHistogramSampleCount(uma_log, "TestMetricsProvider.Regular"), 1);
885 
886   EXPECT_EQ(kCrashedVersion, uma_log.system_profile().app_version());
887   EXPECT_EQ(kCurrentVersion,
888             uma_log.system_profile().log_written_by_app_version());
889 
890   histogram_tester.ExpectBucketCount("Stability.Counts2",
891                                      StabilityEventType::kBrowserCrash, 1);
892 }
893 
TEST_P(MetricsServiceTestWithFeatures,InitialLogsHaveOnDidCreateMetricsLogHistograms)894 TEST_P(MetricsServiceTestWithFeatures,
895        InitialLogsHaveOnDidCreateMetricsLogHistograms) {
896   EnableMetricsReporting();
897   TestMetricsServiceClient client;
898   TestMetricsService service(GetMetricsStateManager(), &client,
899                              GetLocalState());
900 
901   // Create a provider that will log to |kOnDidCreateMetricsLogHistogramName|
902   // in OnDidCreateMetricsLog()
903   auto* test_provider = new TestMetricsProviderForOnDidCreateMetricsLog();
904   service.RegisterMetricsProvider(
905       std::unique_ptr<MetricsProvider>(test_provider));
906 
907   service.InitializeMetricsRecordingState();
908   // Start() will create the first ongoing log.
909   service.Start();
910   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
911 
912   // Fast forward the time by |initialization_delay|, which is when the pending
913   // init tasks will run.
914   base::TimeDelta initialization_delay = service.GetInitializationDelay();
915   task_environment_.FastForwardBy(initialization_delay);
916   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
917 
918   // Fast forward the time until the MetricsRotationScheduler first runs, which
919   // should complete the first ongoing log. Also verify that the test provider
920   // was called when closing the log.
921   // Note: The first log is only created after N = GetInitialIntervalSeconds()
922   // seconds since the start, and since we already fast forwarded by
923   // |initialization_delay| once, we only need to fast forward by
924   // N - |initialization_delay|.
925   task_environment_.FastForwardBy(
926       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
927       initialization_delay);
928   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
929   EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
930 
931   MetricsLogStore* test_log_store = service.LogStoreForTest();
932 
933   // Stage the next log, which should be the first ongoing log.
934   // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
935   test_log_store->StageNextLog();
936   EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
937 
938   // Discard the staged log and close and stage the next log, which is the
939   // second "ongoing log".
940   // Check that it has one sample in |kOnDidCreateMetricsLogHistogramName|.
941   // Also verify that the test provider was called when closing the new log.
942   test_provider->set_record_histogram_snapshots_called(false);
943   test_log_store->DiscardStagedLog();
944   service.StageCurrentLogForTest();
945   EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
946   EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
947 
948   // Check one more log for good measure.
949   test_provider->set_record_histogram_snapshots_called(false);
950   test_log_store->DiscardStagedLog();
951   service.StageCurrentLogForTest();
952   EXPECT_EQ(1, GetSampleCountOfOnDidCreateLogHistogram(test_log_store));
953   EXPECT_TRUE(test_provider->record_histogram_snapshots_called());
954 
955   service.Stop();
956 
957   // Clean up histograms.
958   base::StatisticsRecorder::ForgetHistogramForTesting(
959       kOnDidCreateMetricsLogHistogramName);
960 }
961 
TEST_P(MetricsServiceTestWithFeatures,MarkCurrentHistogramsAsReported)962 TEST_P(MetricsServiceTestWithFeatures, MarkCurrentHistogramsAsReported) {
963   EnableMetricsReporting();
964   TestMetricsServiceClient client;
965   TestMetricsService service(GetMetricsStateManager(), &client,
966                              GetLocalState());
967 
968   // Emit to histogram |Test.Before.Histogram|.
969   ASSERT_FALSE(HistogramExists("Test.Before.Histogram"));
970   base::UmaHistogramBoolean("Test.Before.Histogram", true);
971   ASSERT_TRUE(HistogramExists("Test.Before.Histogram"));
972 
973   // Mark histogram data that has been collected until now (in particular, the
974   // |Test.Before.Histogram| sample) as reported.
975   service.MarkCurrentHistogramsAsReported();
976 
977   // Emit to histogram |Test.After.Histogram|.
978   ASSERT_FALSE(HistogramExists("Test.After.Histogram"));
979   base::UmaHistogramBoolean("Test.After.Histogram", true);
980   ASSERT_TRUE(HistogramExists("Test.After.Histogram"));
981 
982   // Verify that the |Test.Before.Histogram| sample was marked as reported, and
983   // is not included in the next snapshot.
984   EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
985   // Verify that the |Test.After.Histogram| sample was not marked as reported,
986   // and is included in the next snapshot.
987   EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
988 
989   // Clean up histograms.
990   base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
991   base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
992 }
993 
TEST_P(MetricsServiceTestWithFeatures,LogHasUserActions)994 TEST_P(MetricsServiceTestWithFeatures, LogHasUserActions) {
995   // This test verifies that user actions are properly captured in UMA logs.
996   // In particular, it checks that the first log has actions, a behavior that
997   // was buggy in the past, plus additional checks for subsequent logs with
998   // different numbers of actions.
999   EnableMetricsReporting();
1000   TestMetricsServiceClient client;
1001   TestMetricsService service(GetMetricsStateManager(), &client,
1002                              GetLocalState());
1003 
1004   service.InitializeMetricsRecordingState();
1005 
1006   // Start() will create an initial log.
1007   service.Start();
1008   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1009 
1010   base::RecordAction(base::UserMetricsAction("TestAction"));
1011   base::RecordAction(base::UserMetricsAction("TestAction"));
1012   base::RecordAction(base::UserMetricsAction("DifferentAction"));
1013 
1014   // Fast forward the time by |initialization_delay|, which is when the pending
1015   // init tasks will run.
1016   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1017   task_environment_.FastForwardBy(initialization_delay);
1018   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1019 
1020   // Fast forward the time until the MetricsRotationScheduler first runs, which
1021   // should complete the first ongoing log.
1022   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1023   // seconds since the start, and since we already fast forwarded by
1024   // |initialization_delay| once, we only need to fast forward by
1025   // N - |initialization_delay|.
1026   task_environment_.FastForwardBy(
1027       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1028       initialization_delay);
1029   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1030 
1031   MetricsLogStore* test_log_store = service.LogStoreForTest();
1032 
1033   // Stage the next log, which should be the initial metrics log.
1034   test_log_store->StageNextLog();
1035   EXPECT_EQ(3, GetNumberOfUserActions(test_log_store));
1036 
1037   // Log another action.
1038   base::RecordAction(base::UserMetricsAction("TestAction"));
1039   test_log_store->DiscardStagedLog();
1040   service.StageCurrentLogForTest();
1041   EXPECT_EQ(1, GetNumberOfUserActions(test_log_store));
1042 
1043   // Check a log with no actions.
1044   test_log_store->DiscardStagedLog();
1045   service.StageCurrentLogForTest();
1046   EXPECT_EQ(0, GetNumberOfUserActions(test_log_store));
1047 
1048   // And another one with a couple.
1049   base::RecordAction(base::UserMetricsAction("TestAction"));
1050   base::RecordAction(base::UserMetricsAction("TestAction"));
1051   test_log_store->DiscardStagedLog();
1052   service.StageCurrentLogForTest();
1053   EXPECT_EQ(2, GetNumberOfUserActions(test_log_store));
1054 }
1055 
TEST_P(MetricsServiceTestWithFeatures,FirstLogCreatedBeforeUnsentLogsSent)1056 TEST_P(MetricsServiceTestWithFeatures, FirstLogCreatedBeforeUnsentLogsSent) {
1057   // This test checks that we will create and serialize the first ongoing log
1058   // before starting to send unsent logs from the past session. The latter is
1059   // simulated by injecting some fake ongoing logs into the MetricsLogStore.
1060   EnableMetricsReporting();
1061   TestMetricsServiceClient client;
1062   TestMetricsService service(GetMetricsStateManager(), &client,
1063                              GetLocalState());
1064 
1065   service.InitializeMetricsRecordingState();
1066   // Start() will create the first ongoing log.
1067   service.Start();
1068   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1069 
1070   MetricsLogStore* test_log_store = service.LogStoreForTest();
1071 
1072   // Set up the log store with an existing fake log entry. The string content
1073   // is never deserialized to proto, so we're just passing some dummy content.
1074   ASSERT_EQ(0u, test_log_store->initial_log_count());
1075   ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1076   test_log_store->StoreLog("blah_blah", MetricsLog::ONGOING_LOG, LogMetadata(),
1077                            MetricsLogsEventManager::CreateReason::kUnknown);
1078   // Note: |initial_log_count()| refers to initial stability logs, so the above
1079   // log is counted an ongoing log (per its type).
1080   ASSERT_EQ(0u, test_log_store->initial_log_count());
1081   ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1082 
1083   // Fast forward the time by |initialization_delay|, which is when the pending
1084   // init tasks will run.
1085   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1086   task_environment_.FastForwardBy(initialization_delay);
1087   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1088 
1089   // Fast forward the time until the MetricsRotationScheduler first runs, which
1090   // should complete the first ongoing log.
1091   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1092   // seconds since the start, and since we already fast forwarded by
1093   // |initialization_delay| once, we only need to fast forward by
1094   // N - |initialization_delay|.
1095   task_environment_.FastForwardBy(
1096       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1097       initialization_delay);
1098   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1099   // When the init task is complete, the first ongoing log should be created
1100   // and added to the ongoing logs.
1101   EXPECT_EQ(0u, test_log_store->initial_log_count());
1102   EXPECT_EQ(2u, test_log_store->ongoing_log_count());
1103 }
1104 
TEST_P(MetricsServiceTestWithFeatures,MetricsProviderOnRecordingDisabledCalledOnInitialStop)1105 TEST_P(MetricsServiceTestWithFeatures,
1106        MetricsProviderOnRecordingDisabledCalledOnInitialStop) {
1107   TestMetricsServiceClient client;
1108   TestMetricsService service(GetMetricsStateManager(), &client,
1109                              GetLocalState());
1110 
1111   TestMetricsProvider* test_provider = new TestMetricsProvider();
1112   service.RegisterMetricsProvider(
1113       std::unique_ptr<MetricsProvider>(test_provider));
1114 
1115   service.InitializeMetricsRecordingState();
1116   service.Stop();
1117 
1118   EXPECT_TRUE(test_provider->on_recording_disabled_called());
1119 }
1120 
TEST_P(MetricsServiceTestWithFeatures,MetricsProvidersInitialized)1121 TEST_P(MetricsServiceTestWithFeatures, MetricsProvidersInitialized) {
1122   TestMetricsServiceClient client;
1123   TestMetricsService service(GetMetricsStateManager(), &client,
1124                              GetLocalState());
1125 
1126   TestMetricsProvider* test_provider = new TestMetricsProvider();
1127   service.RegisterMetricsProvider(
1128       std::unique_ptr<MetricsProvider>(test_provider));
1129 
1130   service.InitializeMetricsRecordingState();
1131 
1132   EXPECT_TRUE(test_provider->init_called());
1133 }
1134 
1135 // Verify that FieldTrials activated by a MetricsProvider are reported by the
1136 // FieldTrialsProvider.
TEST_P(MetricsServiceTestWithFeatures,ActiveFieldTrialsReported)1137 TEST_P(MetricsServiceTestWithFeatures, ActiveFieldTrialsReported) {
1138   EnableMetricsReporting();
1139   TestMetricsServiceClient client;
1140   TestMetricsService service(GetMetricsStateManager(), &client,
1141                              GetLocalState());
1142 
1143   // Set up FieldTrials.
1144   const std::string trial_name1 = "CoffeeExperiment";
1145   const std::string group_name1 = "Free";
1146   base::FieldTrial* trial1 =
1147       base::FieldTrialList::CreateFieldTrial(trial_name1, group_name1);
1148 
1149   const std::string trial_name2 = "DonutExperiment";
1150   const std::string group_name2 = "MapleBacon";
1151   base::FieldTrial* trial2 =
1152       base::FieldTrialList::CreateFieldTrial(trial_name2, group_name2);
1153 
1154   service.RegisterMetricsProvider(
1155       std::make_unique<ExperimentTestMetricsProvider>(trial1, trial2));
1156 
1157   service.InitializeMetricsRecordingState();
1158   service.Start();
1159   service.StageCurrentLogForTest();
1160 
1161   MetricsLogStore* test_log_store = service.LogStoreForTest();
1162   ChromeUserMetricsExtension uma_log;
1163   EXPECT_TRUE(DecodeLogDataToProto(test_log_store->staged_log(), &uma_log));
1164 
1165   // Verify that the reported FieldTrial IDs are for the trial set up by this
1166   // test.
1167   EXPECT_TRUE(
1168       IsFieldTrialPresent(uma_log.system_profile(), trial_name1, group_name1));
1169   EXPECT_TRUE(
1170       IsFieldTrialPresent(uma_log.system_profile(), trial_name2, group_name2));
1171 }
1172 
TEST_P(MetricsServiceTestWithFeatures,SystemProfileDataProvidedOnEnableRecording)1173 TEST_P(MetricsServiceTestWithFeatures,
1174        SystemProfileDataProvidedOnEnableRecording) {
1175   EnableMetricsReporting();
1176   TestMetricsServiceClient client;
1177   TestMetricsService service(GetMetricsStateManager(), &client,
1178                              GetLocalState());
1179 
1180   TestMetricsProvider* test_provider = new TestMetricsProvider();
1181   service.RegisterMetricsProvider(
1182       std::unique_ptr<MetricsProvider>(test_provider));
1183 
1184   service.InitializeMetricsRecordingState();
1185 
1186   // ProvideSystemProfileMetrics() shouldn't be called initially.
1187   EXPECT_FALSE(test_provider->provide_system_profile_metrics_called());
1188   EXPECT_FALSE(service.persistent_system_profile_provided());
1189 
1190   service.Start();
1191 
1192   // Start should call ProvideSystemProfileMetrics().
1193   EXPECT_TRUE(test_provider->provide_system_profile_metrics_called());
1194   EXPECT_TRUE(service.persistent_system_profile_provided());
1195   EXPECT_FALSE(service.persistent_system_profile_complete());
1196 }
1197 
1198 // Verify that the two separate MetricsSchedulers (MetricsRotationScheduler and
1199 // MetricsUploadScheduler) function together properly.
TEST_P(MetricsServiceTestWithFeatures,SplitRotation)1200 TEST_P(MetricsServiceTestWithFeatures, SplitRotation) {
1201   EnableMetricsReporting();
1202   TestMetricsServiceClient client;
1203   TestMetricsService service(GetMetricsStateManager(), &client,
1204                              GetLocalState());
1205   service.InitializeMetricsRecordingState();
1206   service.Start();
1207   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1208 
1209   // Fast forward the time by |initialization_delay|, which is when the pending
1210   // init tasks will run.
1211   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1212   task_environment_.FastForwardBy(initialization_delay);
1213   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1214 
1215   // Fast forward the time until the MetricsRotationScheduler first runs, which
1216   // should complete the first ongoing log. The independent-metrics upload job
1217   // will be started and always be a task. This should also mark the rotation
1218   // scheduler as idle, so that the next time we attempt to create a log, we
1219   // return early (and don't create a log).
1220   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1221   // seconds since the start, and since we already fast forwarded by
1222   // |initialization_delay| once, we only need to fast forward by
1223   // N - |initialization_delay|.
1224   MetricsLogStore* log_store = service.LogStoreForTest();
1225   EXPECT_FALSE(log_store->has_unsent_logs());
1226   task_environment_.FastForwardBy(
1227       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1228       initialization_delay);
1229   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1230   EXPECT_TRUE(log_store->has_unsent_logs());
1231   EXPECT_EQ(1U, log_store->ongoing_log_count());
1232 
1233   // There should be three (delayed) tasks: one for querying independent logs
1234   // from metrics providers, one for uploading the unsent log, and one for
1235   // creating the next log.
1236   EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1237 
1238   // Fast forward the time so that the upload loop starts uploading logs.
1239   base::TimeDelta unsent_log_interval =
1240       MetricsUploadScheduler::GetUnsentLogsInterval();
1241   task_environment_.FastForwardBy(unsent_log_interval);
1242   EXPECT_TRUE(client.uploader()->is_uploading());
1243   // There should be two (delayed) tasks: one for querying independent logs from
1244   // metrics providers, and one for creating the next log. I.e., the task to
1245   // upload a log should be running, and should not be in the task queue
1246   // anymore. The uploading of this log will only be completed later on in order
1247   // to simulate an edge case here.
1248   EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1249 
1250   // Fast forward the time so that the task to create another log is run. This
1251   // time, however, it should return early due to being idle (i.e., not create a
1252   // log), and it should not post another task to create another log. I.e.,
1253   // there should only be one (delayed) task: one for querying independent logs
1254   // from metrics providers.
1255   // Note: The log is only created after |rotation_scheduler_interval| seconds,
1256   // and since we already fast forwarded by |unsent_log_interval| once, we only
1257   // need to fast forward by
1258   // |rotation_scheduler_interval| - |unsent_log_interval|.
1259   base::TimeDelta rotation_scheduler_interval = client.GetUploadInterval();
1260   task_environment_.FastForwardBy(rotation_scheduler_interval -
1261                                   unsent_log_interval);
1262   EXPECT_EQ(1U, log_store->ongoing_log_count());
1263   EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1264 
1265   // Simulate completing the upload. Since there is no other log to be uploaded,
1266   // no task should be re-posted. I.e., there should only be one (delayed)
1267   // task: one for querying independent logs from metrics providers.
1268   client.uploader()->CompleteUpload(200);
1269   EXPECT_FALSE(client.uploader()->is_uploading());
1270   EXPECT_FALSE(log_store->has_unsent_logs());
1271   EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1272 
1273   // Simulate interacting with the browser, which should 1) set the rotation
1274   // scheduler to not idle, 2) queue a task to upload the next log (if there is
1275   // one), and 3) queue a task to create the next log. I.e., there should be
1276   // three (delayed) tasks: one for querying independent logs from metrics
1277   // providers, one for uploading an unsent log, and one for creating the next
1278   // log.
1279   service.OnApplicationNotIdle();
1280   EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1281 
1282   // We now simulate a more common scenario.
1283 
1284   // Fast forward the time so that the task to upload a log runs. Since there
1285   // should be no logs, it should return early, and not re-post a task. I.e.,
1286   // there should be two tasks: one for querying independent logs from metrics
1287   // providers, and one for creating the next log.
1288   task_environment_.FastForwardBy(unsent_log_interval);
1289   EXPECT_FALSE(client.uploader()->is_uploading());
1290   EXPECT_FALSE(log_store->has_unsent_logs());
1291   EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1292 
1293   // Fast forward the time so that the next log is created. It should re-post
1294   // a task to create a new log, and should also re-start the upload scheduler.
1295   // I.e., there should be three (delayed) tasks: one for querying independent
1296   // logs from metrics providers, one for uploading an unsent log, and one for
1297   // creating the next log.
1298   // Note: The log is only created after |rotation_scheduler_interval| seconds,
1299   // and since we already fast forwarded by |unsent_log_interval| once, we only
1300   // need to fast forward by
1301   // |rotation_scheduler_interval| - |unsent_log_interval|.
1302   task_environment_.FastForwardBy(rotation_scheduler_interval -
1303                                   unsent_log_interval);
1304   EXPECT_TRUE(log_store->has_unsent_logs());
1305   EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1306 
1307   // Fast forward the time so that the task to upload a log runs.
1308   task_environment_.FastForwardBy(unsent_log_interval);
1309   EXPECT_TRUE(client.uploader()->is_uploading());
1310   // There should be two (delayed) tasks: one for querying independent logs from
1311   // metrics providers, and one for creating the next log. I.e., the task to
1312   // upload a log should be running, and should not be in the task queue
1313   // anymore.
1314   EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1315 
1316   // Simulate completing the upload. However, before doing so, add a dummy log
1317   // in order to test that when the upload task completes, if it detects another
1318   // log, it will re-post a task to upload the next log. I.e., after uploading
1319   // the log, there should be three (delayed) tasks: one for querying
1320   // independent logs from metrics providers, one for uploading an unsent log,
1321   // and one for creating the next log.
1322   log_store->StoreLog("dummy log", MetricsLog::LogType::ONGOING_LOG,
1323                       LogMetadata(),
1324                       MetricsLogsEventManager::CreateReason::kUnknown);
1325   EXPECT_EQ(2U, log_store->ongoing_log_count());
1326   client.uploader()->CompleteUpload(200);
1327   EXPECT_FALSE(client.uploader()->is_uploading());
1328   EXPECT_EQ(1U, log_store->ongoing_log_count());
1329   EXPECT_EQ(3U, task_environment_.GetPendingMainThreadTaskCount());
1330 
1331   // Fast forward the time so that the task to upload a log runs.
1332   task_environment_.FastForwardBy(unsent_log_interval);
1333   EXPECT_TRUE(client.uploader()->is_uploading());
1334   // There should be two (delayed) tasks: one for querying independent logs from
1335   // metrics providers, and one for creating the next log. I.e., the task to
1336   // upload a log should be running, and should not be in the task queue
1337   // anymore.
1338   EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1339 
1340   // Simulate completing the upload. Since there is no other log to be uploaded,
1341   // no task should be posted. I.e., there should only be two (delayed) tasks:
1342   // one for querying independent logs from metrics providers, and one.
1343   client.uploader()->CompleteUpload(200);
1344   EXPECT_FALSE(client.uploader()->is_uploading());
1345   EXPECT_FALSE(log_store->has_unsent_logs());
1346   EXPECT_EQ(2U, task_environment_.GetPendingMainThreadTaskCount());
1347 
1348   // Fast forward the time so that the task to create another log is run. It
1349   // should return early due to being idle (i.e., not create a log), and it
1350   // should not post another task to create another log. I.e., there should only
1351   // be one (delayed) task: one for querying independent logs from metrics
1352   // providers.
1353   // Note: The log is only created after |rotation_scheduler_interval| seconds,
1354   // and since we already fast forwarded by |unsent_log_interval| twice, we only
1355   // need to fast forward by
1356   // |rotation_scheduler_interval| - 2 * |unsent_log_interval|.
1357   task_environment_.FastForwardBy(rotation_scheduler_interval -
1358                                   2 * unsent_log_interval);
1359   EXPECT_FALSE(log_store->has_unsent_logs());
1360   EXPECT_EQ(1U, task_environment_.GetPendingMainThreadTaskCount());
1361 }
1362 
TEST_P(MetricsServiceTestWithFeatures,LastLiveTimestamp)1363 TEST_P(MetricsServiceTestWithFeatures, LastLiveTimestamp) {
1364   EnableMetricsReporting();
1365   TestMetricsServiceClient client;
1366   TestMetricsService service(GetMetricsStateManager(), &client,
1367                              GetLocalState());
1368 
1369   base::Time initial_last_live_time =
1370       GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1371 
1372   service.InitializeMetricsRecordingState();
1373   service.Start();
1374   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1375 
1376   // Fast forward the time by |initialization_delay|, which is when the pending
1377   // init tasks will run.
1378   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1379   task_environment_.FastForwardBy(initialization_delay);
1380   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1381 
1382   // Fast forward the time until the MetricsRotationScheduler first runs, which
1383   // should complete the first ongoing log. Also verify that the test provider
1384   // was called when closing the log.
1385   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1386   // seconds since the start, and since we already fast forwarded by
1387   // |initialization_delay| once, we only need to fast forward by
1388   // N - |initialization_delay|.
1389   task_environment_.FastForwardBy(
1390       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1391       initialization_delay);
1392   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1393   size_t num_pending_tasks = task_environment_.GetPendingMainThreadTaskCount();
1394 
1395   service.StartUpdatingLastLiveTimestamp();
1396 
1397   // Starting the update sequence should not write anything, but should
1398   // set up for a later write.
1399   EXPECT_EQ(
1400       initial_last_live_time,
1401       GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1402   EXPECT_EQ(num_pending_tasks + 1,
1403             task_environment_.GetPendingMainThreadTaskCount());
1404 
1405   // Fast forward the time so that the task to update the "last alive timestamp"
1406   // runs.
1407   task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1408 
1409   // Verify that the time has updated in local state.
1410   base::Time updated_last_live_time =
1411       GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
1412   EXPECT_LT(initial_last_live_time, updated_last_live_time);
1413 
1414   // Double check that an update was scheduled again.
1415   task_environment_.FastForwardBy(service.GetUpdateLastAliveTimestampDelay());
1416   EXPECT_LT(
1417       updated_last_live_time,
1418       GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
1419 }
1420 
TEST_P(MetricsServiceTestWithFeatures,EnablementObserverNotification)1421 TEST_P(MetricsServiceTestWithFeatures, EnablementObserverNotification) {
1422   EnableMetricsReporting();
1423   TestMetricsServiceClient client;
1424   TestMetricsService service(GetMetricsStateManager(), &client,
1425                              GetLocalState());
1426   service.InitializeMetricsRecordingState();
1427 
1428   std::optional<bool> enabled;
1429   auto observer = [&enabled](bool notification) { enabled = notification; };
1430 
1431   auto subscription =
1432       service.AddEnablementObserver(base::BindLambdaForTesting(observer));
1433 
1434   service.Start();
1435   ASSERT_TRUE(enabled.has_value());
1436   EXPECT_TRUE(enabled.value());
1437 
1438   enabled.reset();
1439 
1440   service.Stop();
1441   ASSERT_TRUE(enabled.has_value());
1442   EXPECT_FALSE(enabled.value());
1443 }
1444 
1445 // Verifies that when a cloned install is detected, logs are purged.
TEST_P(MetricsServiceTestWithFeatures,PurgeLogsOnClonedInstallDetected)1446 TEST_P(MetricsServiceTestWithFeatures, PurgeLogsOnClonedInstallDetected) {
1447   EnableMetricsReporting();
1448   TestMetricsServiceClient client;
1449   TestMetricsService service(GetMetricsStateManager(), &client,
1450                              GetLocalState());
1451   service.InitializeMetricsRecordingState();
1452 
1453   // Store various logs.
1454   MetricsLogStore* test_log_store = service.LogStoreForTest();
1455   test_log_store->StoreLog("dummy log data", MetricsLog::ONGOING_LOG,
1456                            LogMetadata(),
1457                            MetricsLogsEventManager::CreateReason::kUnknown);
1458   test_log_store->StageNextLog();
1459   test_log_store->StoreLog("more dummy log data", MetricsLog::ONGOING_LOG,
1460                            LogMetadata(),
1461                            MetricsLogsEventManager::CreateReason::kUnknown);
1462   test_log_store->StoreLog("dummy stability log",
1463                            MetricsLog::INITIAL_STABILITY_LOG, LogMetadata(),
1464                            MetricsLogsEventManager::CreateReason::kUnknown);
1465   test_log_store->SetAlternateOngoingLogStore(InitializeTestLogStoreAndGet());
1466   test_log_store->StoreLog("dummy log for alternate ongoing log store",
1467                            MetricsLog::ONGOING_LOG, LogMetadata(),
1468                            MetricsLogsEventManager::CreateReason::kUnknown);
1469   EXPECT_TRUE(test_log_store->has_staged_log());
1470   EXPECT_TRUE(test_log_store->has_unsent_logs());
1471 
1472   ClonedInstallDetector* cloned_install_detector =
1473       GetMetricsStateManager()->cloned_install_detector_for_testing();
1474 
1475   static constexpr char kTestRawId[] = "test";
1476   // Hashed machine id for |kTestRawId|.
1477   static constexpr int kTestHashedId = 2216819;
1478 
1479   // Save a machine id that will not cause a clone to be detected.
1480   GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId);
1481   cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1482   // Verify that the logs are still present.
1483   EXPECT_TRUE(test_log_store->has_staged_log());
1484   EXPECT_TRUE(test_log_store->has_unsent_logs());
1485 
1486   // Save a machine id that will cause a clone to be detected.
1487   GetLocalState()->SetInteger(prefs::kMetricsMachineId, kTestHashedId + 1);
1488   cloned_install_detector->SaveMachineId(GetLocalState(), kTestRawId);
1489   // Verify that the logs were purged.
1490   EXPECT_FALSE(test_log_store->has_staged_log());
1491   EXPECT_FALSE(test_log_store->has_unsent_logs());
1492 }
1493 
1494 #if BUILDFLAG(IS_CHROMEOS_LACROS)
1495 // ResetClientId is only enabled on certain targets.
TEST_P(MetricsServiceTestWithFeatures,SetClientIdToExternalId)1496 TEST_P(MetricsServiceTestWithFeatures, SetClientIdToExternalId) {
1497   EnableMetricsReporting();
1498   TestMetricsServiceClient client;
1499   TestMetricsService service(GetMetricsStateManager(), &client,
1500                              GetLocalState());
1501 
1502   const std::string client_id = "d92ad666-a420-4c73-8718-94311ae2ff5f";
1503 
1504   EXPECT_NE(service.GetClientId(), client_id);
1505 
1506   service.SetExternalClientId(client_id);
1507   // Reset will cause the client id to be regenerated. If an external client id
1508   // is provided, it should defer to using that id instead of creating its own.
1509   service.ResetClientId();
1510 
1511   EXPECT_EQ(service.GetClientId(), client_id);
1512 }
1513 #endif  //  BUILDFLAG(IS_CHROMEOS_LACROS)
1514 
1515 #if BUILDFLAG(IS_CHROMEOS_ASH)
TEST_P(MetricsServiceTestWithFeatures,OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet)1516 TEST_P(MetricsServiceTestWithFeatures,
1517        OngoingLogNotFlushedBeforeInitialLogWhenUserLogStoreSet) {
1518   EnableMetricsReporting();
1519   TestMetricsServiceClient client;
1520   TestMetricsService service(GetMetricsStateManager(), &client,
1521                              GetLocalState());
1522 
1523   service.InitializeMetricsRecordingState();
1524   // Start() will create the first ongoing log.
1525   service.Start();
1526   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1527 
1528   MetricsLogStore* test_log_store = service.LogStoreForTest();
1529   std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1530       InitializeTestLogStoreAndGet();
1531   TestUnsentLogStore* alternate_ongoing_log_store_ptr =
1532       alternate_ongoing_log_store.get();
1533 
1534   ASSERT_EQ(0u, test_log_store->initial_log_count());
1535   ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1536 
1537   service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1538 
1539   // Initial logs should not have been collected so the ongoing log being
1540   // recorded should not be flushed when a user log store is mounted.
1541   ASSERT_EQ(0u, test_log_store->initial_log_count());
1542   ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1543 
1544   // Fast forward the time by |initialization_delay|, which is when the pending
1545   // init tasks will run.
1546   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1547   task_environment_.FastForwardBy(initialization_delay);
1548   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1549 
1550   // Fast forward the time until the MetricsRotationScheduler first runs, which
1551   // should complete the first ongoing log.
1552   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1553   // seconds since the start, and since we already fast forwarded by
1554   // |initialization_delay| once, we only need to fast forward by
1555   // N - |initialization_delay|.
1556   task_environment_.FastForwardBy(
1557       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1558       initialization_delay);
1559   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1560   // When the init task is complete, the first ongoing log should be created
1561   // in the alternate ongoing log store.
1562   EXPECT_EQ(0u, test_log_store->initial_log_count());
1563   EXPECT_EQ(0u, test_log_store->ongoing_log_count());
1564   EXPECT_EQ(1u, alternate_ongoing_log_store_ptr->size());
1565 }
1566 
TEST_P(MetricsServiceTestWithFeatures,OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet)1567 TEST_P(MetricsServiceTestWithFeatures,
1568        OngoingLogFlushedAfterInitialLogWhenUserLogStoreSet) {
1569   EnableMetricsReporting();
1570   TestMetricsServiceClient client;
1571   TestMetricsService service(GetMetricsStateManager(), &client,
1572                              GetLocalState());
1573 
1574   service.InitializeMetricsRecordingState();
1575   // Start() will create the first ongoing log.
1576   service.Start();
1577   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1578 
1579   MetricsLogStore* test_log_store = service.LogStoreForTest();
1580   std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1581       InitializeTestLogStoreAndGet();
1582 
1583   // Init state.
1584   ASSERT_EQ(0u, test_log_store->initial_log_count());
1585   ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1586 
1587   // Fast forward the time by |initialization_delay|, which is when the pending
1588   // init tasks will run.
1589   base::TimeDelta initialization_delay = service.GetInitializationDelay();
1590   task_environment_.FastForwardBy(initialization_delay);
1591   EXPECT_EQ(TestMetricsService::INIT_TASK_DONE, service.state());
1592 
1593   // Fast forward the time until the MetricsRotationScheduler first runs, which
1594   // should complete the first ongoing log.
1595   // Note: The first log is only created after N = GetInitialIntervalSeconds()
1596   // seconds since the start, and since we already fast forwarded by
1597   // |initialization_delay| once, we only need to fast forward by
1598   // N - |initialization_delay|.
1599   task_environment_.FastForwardBy(
1600       base::Seconds(MetricsScheduler::GetInitialIntervalSeconds()) -
1601       initialization_delay);
1602   ASSERT_EQ(TestMetricsService::SENDING_LOGS, service.state());
1603   ASSERT_EQ(0u, test_log_store->initial_log_count());
1604   ASSERT_EQ(1u, test_log_store->ongoing_log_count());
1605 
1606   // User log store set post-init.
1607   service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1608 
1609   // Another log should have been flushed from setting the user log store.
1610   ASSERT_EQ(0u, test_log_store->initial_log_count());
1611   ASSERT_EQ(2u, test_log_store->ongoing_log_count());
1612 }
1613 
TEST_P(MetricsServiceTestWithFeatures,OngoingLogDiscardedAfterEarlyUnsetUserLogStore)1614 TEST_P(MetricsServiceTestWithFeatures,
1615        OngoingLogDiscardedAfterEarlyUnsetUserLogStore) {
1616   EnableMetricsReporting();
1617   TestMetricsServiceClient client;
1618   TestMetricsService service(GetMetricsStateManager(), &client,
1619                              GetLocalState());
1620 
1621   service.InitializeMetricsRecordingState();
1622   // Start() will create the first ongoing log.
1623   service.Start();
1624   ASSERT_EQ(TestMetricsService::INIT_TASK_SCHEDULED, service.state());
1625 
1626   MetricsLogStore* test_log_store = service.LogStoreForTest();
1627   std::unique_ptr<TestUnsentLogStore> alternate_ongoing_log_store =
1628       InitializeTestLogStoreAndGet();
1629 
1630   ASSERT_EQ(0u, test_log_store->initial_log_count());
1631   ASSERT_EQ(0u, test_log_store->ongoing_log_count());
1632 
1633   service.SetUserLogStore(std::move(alternate_ongoing_log_store));
1634 
1635   // Unset the user log store before we started sending logs.
1636   base::UmaHistogramBoolean("Test.Before.Histogram", true);
1637   service.UnsetUserLogStore();
1638   base::UmaHistogramBoolean("Test.After.Histogram", true);
1639 
1640   // Verify that the current log was discarded.
1641   EXPECT_FALSE(service.GetCurrentLogForTest());
1642 
1643   // Verify that histograms from before unsetting the user log store were
1644   // flushed.
1645   EXPECT_EQ(0, GetHistogramDeltaTotalCount("Test.Before.Histogram"));
1646   EXPECT_EQ(1, GetHistogramDeltaTotalCount("Test.After.Histogram"));
1647 
1648   // Clean up histograms.
1649   base::StatisticsRecorder::ForgetHistogramForTesting("Test.Before.Histogram");
1650   base::StatisticsRecorder::ForgetHistogramForTesting("Test.After.Histogram");
1651 }
1652 #endif  // BUILDFLAG(IS_CHROMEOS_LACROS)
1653 
1654 }  // namespace metrics
1655