1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/histogram.h"
6
7 #include <memory>
8 #include <set>
9 #include <string>
10 #include <vector>
11
12 #include "base/atomicops.h"
13 #include "base/containers/span.h"
14 #include "base/metrics/bucket_ranges.h"
15 #include "base/metrics/persistent_histogram_allocator.h"
16 #include "base/metrics/sparse_histogram.h"
17 #include "base/no_destructor.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/test/scoped_feature_list.h"
20 #include "base/threading/simple_thread.h"
21 #include "testing/gtest/include/gtest/gtest.h"
22
23 namespace base {
24
25 namespace {
26
GetPermanentName(const std::string & name)27 char const* GetPermanentName(const std::string& name) {
28 // A set of histogram names that provides the "permanent" lifetime required
29 // by histogram objects for those strings that are not already code constants
30 // or held in persistent memory.
31 static base::NoDestructor<std::set<std::string>> permanent_names;
32
33 auto result = permanent_names->insert(name);
34 return result.first->c_str();
35 }
36
GetBucketIndex(HistogramBase::Sample value,const BucketRanges * ranges)37 size_t GetBucketIndex(HistogramBase::Sample value, const BucketRanges* ranges) {
38 size_t bucket_count = ranges->bucket_count();
39 EXPECT_GE(bucket_count, 1U);
40 for (size_t i = 0; i < bucket_count; ++i) {
41 if (ranges->range(i) > value) {
42 return i - 1;
43 }
44 }
45 return bucket_count - 1;
46 }
47
48 // Runs a task in a thread that will emit |num_emission_| times the passed
49 // |histograms| and snapshot them. The thread will also keep track of the
50 // actual samples emitted, as well as the ones found in the snapshots taken, so
51 // that they can be compared.
52 class SnapshotDeltaThread : public SimpleThread {
53 public:
SnapshotDeltaThread(const std::string & name,size_t num_emissions,span<HistogramBase * > histograms,HistogramBase::Sample histogram_max,subtle::Atomic32 * real_total_samples_count,span<subtle::Atomic32> real_bucket_counts,subtle::Atomic32 * snapshots_total_samples_count,span<subtle::Atomic32> snapshots_bucket_counts)54 SnapshotDeltaThread(const std::string& name,
55 size_t num_emissions,
56 span<HistogramBase*> histograms,
57 HistogramBase::Sample histogram_max,
58 subtle::Atomic32* real_total_samples_count,
59 span<subtle::Atomic32> real_bucket_counts,
60 subtle::Atomic32* snapshots_total_samples_count,
61 span<subtle::Atomic32> snapshots_bucket_counts)
62 : SimpleThread(name, Options()),
63 num_emissions_(num_emissions),
64 histograms_(histograms),
65 histogram_max_(histogram_max),
66 real_total_samples_count_(real_total_samples_count),
67 real_bucket_counts_(real_bucket_counts),
68 snapshots_total_samples_count_(snapshots_total_samples_count),
69 snapshots_bucket_counts_(snapshots_bucket_counts) {}
70
71 SnapshotDeltaThread(const SnapshotDeltaThread&) = delete;
72 SnapshotDeltaThread& operator=(const SnapshotDeltaThread&) = delete;
73
74 ~SnapshotDeltaThread() override = default;
75
Run()76 void Run() override {
77 for (size_t i = 0; i < num_emissions_; ++i) {
78 for (HistogramBase* histogram : histograms_) {
79 // Emit a random sample. rand() is used here to generate such a sample,
80 // but the randomness does not really matter as thread-safety is what is
81 // being tested here and there is already a lot of non-determinism
82 // surrounding scheduling.
83 Histogram::Sample sample = rand() % histogram_max_;
84 histogram->Add(sample);
85
86 // Take a snapshot of the histogram. Because of the multithreading
87 // nature of the test, this may or may not include the sample that was
88 // just emitted, and/or may include samples that came from other
89 // threads.
90 std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotDelta();
91
92 // Store the sample that was emitted as well as the snapshot so that
93 // the totals can be compared later on.
94 StoreActualSample(histogram, sample);
95 StoreSnapshot(std::move(snapshot));
96 }
97 }
98 }
99
100 private:
101 // Stores an actual |sample| that was emitted for |histogram|. This is done
102 // to compare what was found in histogram snapshots (see StoreSnapshot()).
StoreActualSample(HistogramBase * histogram,Histogram::Sample sample)103 void StoreActualSample(HistogramBase* histogram, Histogram::Sample sample) {
104 subtle::NoBarrier_AtomicIncrement(real_total_samples_count_, 1);
105 switch (histogram->GetHistogramType()) {
106 case HISTOGRAM: {
107 const BucketRanges* ranges =
108 static_cast<Histogram*>(histogram)->bucket_ranges();
109 size_t bucket_index = GetBucketIndex(sample, ranges);
110 size_t bucket_min = ranges->range(bucket_index);
111 subtle::NoBarrier_AtomicIncrement(&real_bucket_counts_[bucket_min], 1);
112 break;
113 }
114 case SPARSE_HISTOGRAM:
115 subtle::NoBarrier_AtomicIncrement(&real_bucket_counts_[sample], 1);
116 break;
117 case LINEAR_HISTOGRAM:
118 case BOOLEAN_HISTOGRAM:
119 case CUSTOM_HISTOGRAM:
120 case DUMMY_HISTOGRAM:
121 NOTREACHED();
122 }
123 }
124
125 // Store a |snapshot| that was taken of a histogram. This is done to compare
126 // what was actually emitted (see StoreActualSample()).
StoreSnapshot(std::unique_ptr<HistogramSamples> snapshot)127 void StoreSnapshot(std::unique_ptr<HistogramSamples> snapshot) {
128 HistogramBase::Count snapshot_samples_count = snapshot->TotalCount();
129 subtle::NoBarrier_AtomicIncrement(snapshots_total_samples_count_,
130 snapshot_samples_count);
131 for (auto it = snapshot->Iterator(); !it->Done(); it->Next()) {
132 HistogramBase::Sample min;
133 int64_t max;
134 HistogramBase::Count count;
135 it->Get(&min, &max, &count);
136 // Verify that the snapshot contains only positive bucket counts.
137 // This is to ensure SnapshotDelta() is fully thread-safe, not just
138 // "eventually consistent".
139 ASSERT_GE(count, 0);
140 subtle::NoBarrier_AtomicIncrement(&snapshots_bucket_counts_[min], count);
141 }
142 }
143
144 const size_t num_emissions_;
145 span<HistogramBase*> histograms_;
146 const HistogramBase::Sample histogram_max_;
147 raw_ptr<subtle::Atomic32> real_total_samples_count_;
148 span<subtle::Atomic32> real_bucket_counts_;
149 raw_ptr<subtle::Atomic32> snapshots_total_samples_count_;
150 span<subtle::Atomic32> snapshots_bucket_counts_;
151 };
152
153 } // namespace
154
155 class HistogramThreadsafeTest : public testing::Test {
156 public:
157 HistogramThreadsafeTest() = default;
158
159 HistogramThreadsafeTest(const HistogramThreadsafeTest&) = delete;
160 HistogramThreadsafeTest& operator=(const HistogramThreadsafeTest&) = delete;
161
162 ~HistogramThreadsafeTest() override = default;
163
SetUp()164 void SetUp() override {
165 GlobalHistogramAllocator::CreateWithLocalMemory(4 << 20, /*id=*/0,
166 /*name=*/"");
167 ASSERT_TRUE(GlobalHistogramAllocator::Get());
168
169 // Create a second view of the persistent memory with a new persistent
170 // histogram allocator in order to simulate a subprocess with its own view
171 // of some shared memory.
172 PersistentMemoryAllocator* allocator =
173 GlobalHistogramAllocator::Get()->memory_allocator();
174 std::unique_ptr<PersistentMemoryAllocator> memory_view =
175 std::make_unique<PersistentMemoryAllocator>(
176 /*base=*/const_cast<void*>(allocator->data()), allocator->size(),
177 /*page_size=*/0, /*id=*/0,
178 /*name=*/"GlobalHistogramAllocatorView",
179 PersistentMemoryAllocator::kReadWrite);
180 allocator_view_ =
181 std::make_unique<PersistentHistogramAllocator>(std::move(memory_view));
182 }
183
TearDown()184 void TearDown() override {
185 histograms_.clear();
186 allocator_view_.reset();
187 GlobalHistogramAllocator::ReleaseForTesting();
188 ASSERT_FALSE(GlobalHistogramAllocator::Get());
189 }
190
191 // Creates and returns various histograms (some that live on the persistent
192 // memory, some that live on the local heap, and some that point to the same
193 // underlying data as those that live on the persistent memory but are
194 // different objects).
CreateHistograms(size_t suffix,HistogramBase::Sample max,size_t bucket_count)195 std::vector<HistogramBase*> CreateHistograms(size_t suffix,
196 HistogramBase::Sample max,
197 size_t bucket_count) {
198 // There are 4 ways histograms can store their underlying data:
199 // PersistentSampleVector, PersistentSampleMap, SampleVector, and SampleMap.
200 // The first two are intended for when the data may be either persisted to a
201 // file or shared with another process. The last two are when the histograms
202 // are to be used by the local process only.
203 // Create 4 histograms that use those storage structures respectively.
204 std::vector<HistogramBase*> histograms;
205
206 // Create histograms on the persistent memory (created through the
207 // GlobalHistogramAllocator, which is automatically done when using the
208 // FactoryGet() API). There is no need to store them in |histograms_|
209 // because these histograms are owned by the StatisticsRecorder.
210 std::string numeric_histogram_name =
211 StringPrintf("NumericHistogram%zu", suffix);
212 Histogram* numeric_histogram = static_cast<Histogram*>(
213 Histogram::FactoryGet(numeric_histogram_name, /*minimum=*/1, max,
214 bucket_count, /*flags=*/HistogramBase::kNoFlags));
215 histograms.push_back(numeric_histogram);
216 std::string sparse_histogram_name =
217 StringPrintf("SparseHistogram%zu", suffix);
218 HistogramBase* sparse_histogram =
219 SparseHistogram::FactoryGet(sparse_histogram_name,
220 /*flags=*/HistogramBase::kNoFlags);
221 histograms.push_back(sparse_histogram);
222
223 // Create histograms on the "local heap" (i.e., are not instantiated using
224 // the GlobalHistogramAllocator, which is automatically done when using the
225 // FactoryGet() API). Store them in |histograms_| so that they are not freed
226 // during the test.
227 std::string local_heap_histogram_name =
228 StringPrintf("LocalHeapNumericHistogram%zu", suffix);
229 auto& local_heap_histogram = histograms_.emplace_back(
230 new Histogram(GetPermanentName(local_heap_histogram_name),
231 numeric_histogram->bucket_ranges()));
232 histograms.push_back(local_heap_histogram.get());
233 std::string local_heap_sparse_histogram_name =
234 StringPrintf("LocalHeapSparseHistogram%zu", suffix);
235 auto& local_heap_sparse_histogram =
236 histograms_.emplace_back(new SparseHistogram(
237 GetPermanentName(local_heap_sparse_histogram_name)));
238 histograms.push_back(local_heap_sparse_histogram.get());
239
240 // Furthermore, create two additional *different* histogram objects that
241 // point to the same underlying data as the first two (|numeric_histogram|
242 // and |sparse_histogram|). This is to simulate subprocess histograms (i.e.,
243 // both the main browser process and the subprocess have their own histogram
244 // instance with possibly their own lock, but they both point to the same
245 // underlying storage, and they may both interact with it simultaneously).
246 // There is no need to do this for the "local heap" histograms because "by
247 // definition" they should only be interacted with within the same process.
248 PersistentHistogramAllocator::Iterator hist_it(allocator_view_.get());
249 std::unique_ptr<HistogramBase> subprocess_numeric_histogram;
250 std::unique_ptr<HistogramBase> subprocess_sparse_histogram;
251 while (true) {
252 // GetNext() creates a new histogram instance that points to the same
253 // underlying data as the histogram the iterator is pointing to.
254 std::unique_ptr<HistogramBase> histogram = hist_it.GetNext();
255 if (!histogram) {
256 break;
257 }
258
259 // Make sure the "local heap" histograms are not in persistent memory.
260 EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
261 EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
262
263 if (histogram->histogram_name() == numeric_histogram_name) {
264 subprocess_numeric_histogram = std::move(histogram);
265 } else if (histogram->histogram_name() == sparse_histogram_name) {
266 subprocess_sparse_histogram = std::move(histogram);
267 }
268 }
269 // Make sure we found the histograms, and ensure that they are not the same
270 // histogram objects. Assertions to verify that they are actually pointing
271 // to the same underlying data are not done now (to not mess up the sample
272 // counts).
273 EXPECT_TRUE(subprocess_numeric_histogram);
274 EXPECT_TRUE(subprocess_sparse_histogram);
275 histograms.push_back(subprocess_numeric_histogram.get());
276 histograms.push_back(subprocess_sparse_histogram.get());
277 EXPECT_NE(numeric_histogram, subprocess_numeric_histogram.get());
278 EXPECT_NE(sparse_histogram, subprocess_sparse_histogram.get());
279
280 // Store the histograms in |histograms_| so that they are not freed during
281 // the test.
282 histograms_.emplace_back(std::move(subprocess_numeric_histogram));
283 histograms_.emplace_back(std::move(subprocess_sparse_histogram));
284
285 // Lastly, again, create two additional *different* histogram objects that
286 // point to the same underlying data as the first two (|numeric_histogram|
287 // and |sparse_histogram|). Unlike above, this is not necessarily done to
288 // simulate subprocess histograms, but rather to verify that different
289 // histogram objects created through the *same* allocator work correctly
290 // together. In particular, the sparse histogram found here will use the
291 // same "data manager" (see base::PersistentSparseHistogramDataManager) as
292 // the original |sparse_histogram|. This is in contrast to the "subprocess"
293 // histograms above, which will use a different "data manager" since those
294 // histogram objects were created through a different allocator
295 // (allocator_view_). In production, this is what happens when we try to
296 // merge the histograms of a child process multiple times concurrently
297 // (e.g. while we are merging the histograms of a certain child process in
298 // the background, the browser is backgrounded, triggering another merge but
299 // on the main thread).
300 PersistentHistogramAllocator::Iterator hist_it2(
301 GlobalHistogramAllocator::Get());
302 std::unique_ptr<HistogramBase> numeric_histogram2;
303 std::unique_ptr<HistogramBase> sparse_histogram2;
304 while (true) {
305 // GetNext() creates a new histogram instance that points to the same
306 // underlying data as the histogram the iterator is pointing to.
307 std::unique_ptr<HistogramBase> histogram = hist_it2.GetNext();
308 if (!histogram) {
309 break;
310 }
311
312 // Make sure the "local heap" histograms are not in persistent memory.
313 EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
314 EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
315
316 if (histogram->histogram_name() == numeric_histogram_name) {
317 numeric_histogram2 = std::move(histogram);
318 } else if (histogram->histogram_name() == sparse_histogram_name) {
319 sparse_histogram2 = std::move(histogram);
320 }
321 }
322 // Make sure we found the histograms, and ensure that they are not the same
323 // histogram objects. Assertions to verify that they are actually pointing
324 // to the same underlying data are not done now (to not mess up the sample
325 // counts).
326 EXPECT_TRUE(numeric_histogram2);
327 EXPECT_TRUE(sparse_histogram2);
328 histograms.push_back(numeric_histogram2.get());
329 histograms.push_back(sparse_histogram2.get());
330 EXPECT_NE(numeric_histogram, numeric_histogram2.get());
331 EXPECT_NE(sparse_histogram, sparse_histogram2.get());
332
333 // Store the histograms in |histograms_| so that they are not freed during
334 // the test.
335 histograms_.emplace_back(std::move(numeric_histogram2));
336 histograms_.emplace_back(std::move(sparse_histogram2));
337
338 return histograms;
339 }
340
341 private:
342 // A view of the GlobalHistogramAllocator to simulate a subprocess having its
343 // own view of some shared memory.
344 std::unique_ptr<PersistentHistogramAllocator> allocator_view_;
345
346 // Used to prevent histograms from being freed during the test.
347 std::vector<std::unique_ptr<HistogramBase>> histograms_;
348 };
349
350 // Verifies that SnapshotDelta() is thread safe. That means 1) a sample emitted
351 // while a snapshot is taken is not lost, and 2) concurrent calls to
352 // SnapshotDelta() will not return the same samples. Note that the test makes
353 // use of ASSERT_* instead EXPECT_* because the test is repeated multiple times,
354 // and the use of EXPECT_* produces spammy outputs as it does not end the test
355 // immediately.
TEST_F(HistogramThreadsafeTest,SnapshotDeltaThreadsafe)356 TEST_F(HistogramThreadsafeTest, SnapshotDeltaThreadsafe) {
357 // We try this test |kNumIterations| times to have a coverage of different
358 // scenarios. For example, for a numeric histogram, if it has only samples
359 // within the same bucket, the samples will be stored in a different way than
360 // if it had samples in multiple buckets for efficiency reasons (SingleSample
361 // vs a vector). Hence, the goal of doing this test multiple time is to have
362 // coverage of the SingleSample scenario, because once the histogram has moved
363 // to using a vector, it will not use SingleSample again.
364 // Note: |kNumIterations| was 100 on 4/2023, but was decreased because the
365 // workload was causing flakiness (timing out).
366 constexpr size_t kNumIterations = 50;
367 for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
368 // TL;DR of the test: multiple threads are created, which will each emit to
369 // the same histograms and snapshot their delta multiple times. We keep
370 // track of the actual number of samples found in the snapshots, and ensure
371 // that it matches what we actually emitted.
372
373 // Create histograms. Two histograms should live on persistent memory,
374 // two should live on local heap, and two of them should be simulations of
375 // subprocess histograms that point to the same underlying data as first two
376 // histograms (but are different objects).
377 // The max values of the histograms will alternate between 2 and 50 in order
378 // to have coverage of histograms that are being emitted to with a small
379 // range of values, and a large range of values.
380 const HistogramBase::Sample kHistogramMax = (iteration % 2 == 0) ? 2 : 50;
381 const size_t kBucketCount = (iteration % 2 == 0) ? 3 : 10;
382 std::vector<HistogramBase*> histograms =
383 CreateHistograms(/*suffix=*/iteration, kHistogramMax, kBucketCount);
384
385 // Start |kNumThreads| that will each emit and snapshot the histograms (see
386 // SnapshotDeltaThread). We keep track of the real samples as well as the
387 // samples found in the snapshots so that we can compare that they match
388 // later on.
389 constexpr size_t kNumThreads = 2;
390 constexpr size_t kNumEmissions = 1000;
391 subtle::Atomic32 real_total_samples_count = 0;
392 std::vector<subtle::Atomic32> real_bucket_counts(kHistogramMax, 0);
393 subtle::Atomic32 snapshots_total_samples_count = 0;
394 std::vector<subtle::Atomic32> snapshots_bucket_counts(kHistogramMax, 0);
395 std::unique_ptr<SnapshotDeltaThread> threads[kNumThreads];
396 for (size_t i = 0; i < kNumThreads; ++i) {
397 threads[i] = std::make_unique<SnapshotDeltaThread>(
398 StringPrintf("SnapshotDeltaThread.%zu.%zu", iteration, i),
399 kNumEmissions, histograms, kHistogramMax, &real_total_samples_count,
400 real_bucket_counts, &snapshots_total_samples_count,
401 snapshots_bucket_counts);
402 threads[i]->Start();
403 }
404
405 // Wait until all threads have finished.
406 for (auto& thread : threads) {
407 thread->Join();
408 }
409
410 // Verify that the samples found in the snapshots match what we emitted.
411 ASSERT_EQ(static_cast<size_t>(real_total_samples_count),
412 kNumThreads * kNumEmissions * histograms.size());
413 ASSERT_EQ(snapshots_total_samples_count, real_total_samples_count);
414 for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
415 ASSERT_EQ(snapshots_bucket_counts[i], real_bucket_counts[i]);
416 }
417
418 // Also verify that no more unlogged samples remain, and that the internal
419 // logged samples of the histograms match what we emitted.
420
421 HistogramBase::Count logged_total_samples_count = 0;
422 std::vector<HistogramBase::Count> logged_bucket_counts(
423 /*value=*/kHistogramMax, 0);
424 // We ignore the last four histograms since they are the same as the first
425 // two (they are simulations of histogram instances from a subprocess that
426 // point to the same underlying data, and different histogram instances that
427 // are created from the same allocator). Otherwise, we will be counting the
428 // samples from those histograms thrice.
429 for (size_t i = 0; i < histograms.size() - 4; ++i) {
430 HistogramBase* histogram = histograms[i];
431 ASSERT_EQ(histogram->SnapshotDelta()->TotalCount(), 0);
432 std::unique_ptr<HistogramSamples> logged_samples =
433 histogram->SnapshotSamples();
434 // Each individual histograms should have been emitted to a specific
435 // amount of times. Non-"local heap" histograms were emitted to thrice as
436 // much because they appeared thrice in the |histograms| array -- once as
437 // a normal histogram, once as a simulation of a subprocess histogram, and
438 // once as a duplicate histogram created from the same allocator.
439 size_t expected_logged_samples_count = kNumThreads * kNumEmissions;
440 if (!strstr(histogram->histogram_name(), "LocalHeap")) {
441 expected_logged_samples_count *= 3;
442 }
443 ASSERT_EQ(static_cast<size_t>(logged_samples->TotalCount()),
444 expected_logged_samples_count);
445
446 for (auto it = logged_samples->Iterator(); !it->Done(); it->Next()) {
447 HistogramBase::Sample min;
448 int64_t max;
449 HistogramBase::Count count;
450 it->Get(&min, &max, &count);
451 ASSERT_GE(count, 0);
452 logged_total_samples_count += count;
453 logged_bucket_counts[min] += count;
454 }
455 }
456 ASSERT_EQ(logged_total_samples_count, real_total_samples_count);
457 for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
458 ASSERT_EQ(logged_bucket_counts[i], real_bucket_counts[i]);
459 }
460
461 // Verify that our "subprocess histograms" actually point to the same
462 // underlying data as the "main browser" histograms, despite being different
463 // instances (this was verified earlier). This is done at the end of the
464 // test so as to not mess up the sample counts.
465 HistogramBase* numeric_histogram = histograms[0];
466 HistogramBase* subprocess_numeric_histogram = histograms[4];
467 HistogramBase* sparse_histogram = histograms[1];
468 HistogramBase* subprocess_sparse_histogram = histograms[5];
469 ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 0);
470 ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 0);
471 numeric_histogram->Add(0);
472 sparse_histogram->Add(0);
473 ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 1);
474 ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 1);
475 ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
476 ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
477
478 // Verify that our "duplicate histograms" created from the same allocator
479 // actually point to the same underlying data as the "main" histograms,
480 // despite being different instances (this was verified earlier). This is
481 // done at the end of the test so as to not mess up the sample counts.
482 HistogramBase* numeric_histogram2 = histograms[6];
483 HistogramBase* sparse_histogram2 = histograms[7];
484 ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 0);
485 ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 0);
486 numeric_histogram->Add(0);
487 sparse_histogram->Add(0);
488 ASSERT_EQ(numeric_histogram2->SnapshotDelta()->TotalCount(), 1);
489 ASSERT_EQ(sparse_histogram2->SnapshotDelta()->TotalCount(), 1);
490 ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
491 ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
492 }
493 }
494
495 } // namespace base
496