xref: /aosp_15_r20/external/cronet/base/metrics/persistent_histogram_allocator.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_histogram_allocator.h"
6 
7 #include <atomic>
8 #include <limits>
9 #include <string_view>
10 #include <utility>
11 
12 #include "base/debug/crash_logging.h"
13 #include "base/files/file_path.h"
14 #include "base/files/file_util.h"
15 #include "base/files/important_file_writer.h"
16 #include "base/files/memory_mapped_file.h"
17 #include "base/lazy_instance.h"
18 #include "base/logging.h"
19 #include "base/memory/shared_memory_mapping.h"
20 #include "base/memory/unsafe_shared_memory_region.h"
21 #include "base/metrics/histogram.h"
22 #include "base/metrics/histogram_base.h"
23 #include "base/metrics/histogram_functions.h"
24 #include "base/metrics/histogram_macros.h"
25 #include "base/metrics/histogram_samples.h"
26 #include "base/metrics/metrics_hashes.h"
27 #include "base/metrics/persistent_sample_map.h"
28 #include "base/metrics/sparse_histogram.h"
29 #include "base/metrics/statistics_recorder.h"
30 #include "base/notreached.h"
31 #include "base/pickle.h"
32 #include "base/process/process_handle.h"
33 #include "base/strings/string_number_conversions.h"
34 #include "base/strings/string_split.h"
35 #include "base/strings/stringprintf.h"
36 #include "base/synchronization/lock.h"
37 #include "build/build_config.h"
38 
39 namespace base {
40 
41 namespace {
42 
43 // Type identifiers used when storing in persistent memory so they can be
44 // identified during extraction; the first 4 bytes of the SHA1 of the name
45 // is used as a unique integer. A "version number" is added to the base
46 // so that, if the structure of that object changes, stored older versions
47 // will be safely ignored.
48 enum : uint32_t {
49   kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
50   kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
51 };
52 
53 // The current globally-active persistent allocator for all new histograms.
54 // The object held here will obviously not be destructed at process exit
55 // but that's best since PersistentMemoryAllocator objects (that underlie
56 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
57 // anything essential at exit anyway due to the fact that they depend on data
58 // managed elsewhere and which could be destructed first. An AtomicWord is
59 // used instead of std::atomic because the latter can create global ctors
60 // and dtors.
61 subtle::AtomicWord g_histogram_allocator = 0;
62 
63 // Take an array of range boundaries and create a proper BucketRanges object
64 // which is returned to the caller. A return of nullptr indicates that the
65 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)66 std::unique_ptr<BucketRanges> CreateRangesFromData(
67     HistogramBase::Sample* ranges_data,
68     uint32_t ranges_checksum,
69     size_t count) {
70   // To avoid racy destruction at shutdown, the following may be leaked.
71   std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
72   DCHECK_EQ(count, ranges->size());
73   for (size_t i = 0; i < count; ++i) {
74     if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
75       return nullptr;
76     ranges->set_range(i, ranges_data[i]);
77   }
78 
79   ranges->ResetChecksum();
80   if (ranges->checksum() != ranges_checksum)
81     return nullptr;
82 
83   return ranges;
84 }
85 
86 // Calculate the number of bytes required to store all of a histogram's
87 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)88 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
89   // 2 because each "sample count" also requires a backup "logged count"
90   // used for calculating the delta during snapshot operations.
91   const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
92 
93   // If the |bucket_count| is such that it would overflow the return type,
94   // perhaps as the result of a malicious actor, then return zero to
95   // indicate the problem to the caller.
96   if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
97     return 0;
98 
99   return bucket_count * kBytesPerBucket;
100 }
101 
MergeSamplesToExistingHistogram(HistogramBase * existing,const HistogramBase * histogram,std::unique_ptr<HistogramSamples> samples)102 void MergeSamplesToExistingHistogram(
103     HistogramBase* existing,
104     const HistogramBase* histogram,
105     std::unique_ptr<HistogramSamples> samples) {
106 #if !BUILDFLAG(IS_NACL)
107   // If the passed |histogram| does not match with |existing| (i.e. the one
108   // registered with the global StatisticsRecorder) due to not being the same
109   // type of histogram or due to specifying different buckets, then unexpected
110   // things may happen further down the line. This may be indicative that a
111   // child process is emitting a histogram with different parameters than the
112   // browser process, for example.
113   // TODO(crbug/1432981): Remove this. Used to investigate failures when merging
114   // histograms from an allocator to the global StatisticsRecorder.
115   bool histograms_match = true;
116   HistogramType existing_type = existing->GetHistogramType();
117   if (histogram->GetHistogramType() != existing_type) {
118     // Different histogram types.
119     histograms_match = false;
120   } else if (existing_type == HistogramType::HISTOGRAM ||
121              existing_type == HistogramType::LINEAR_HISTOGRAM ||
122              existing_type == HistogramType::BOOLEAN_HISTOGRAM ||
123              existing_type == HistogramType::CUSTOM_HISTOGRAM) {
124     // Only numeric histograms make use of BucketRanges.
125     const BucketRanges* existing_buckets =
126         static_cast<const Histogram*>(existing)->bucket_ranges();
127     const BucketRanges* histogram_buckets =
128         static_cast<const Histogram*>(histogram)->bucket_ranges();
129     // DCHECK because HasValidChecksum() recomputes the checksum which can be
130     // expensive to do in a loop.
131     DCHECK(existing_buckets->HasValidChecksum() &&
132            histogram_buckets->HasValidChecksum());
133 
134     if (existing_buckets->checksum() != histogram_buckets->checksum()) {
135       // Different buckets.
136       histograms_match = false;
137     }
138   }
139 
140   if (!histograms_match) {
141     // If the histograms do not match, then the call to AddSamples() below might
142     // trigger a NOTREACHED(). Include the histogram name here for debugging
143     // purposes. This is not done in GetOrCreateStatisticsRecorderHistogram()
144     // directly, since that could incorrectly create crash reports for enum
145     // histograms that have newly appended entries (different bucket max and
146     // count).
147     SCOPED_CRASH_KEY_STRING256("PersistentHistogramAllocator", "histogram",
148                                existing->histogram_name());
149     existing->AddSamples(*samples);
150     return;
151   }
152 #endif  // !BUILDFLAG(IS_NACL)
153 
154   // Merge the delta from the passed object to the one in the SR.
155   existing->AddSamples(*samples);
156 }
157 
158 }  // namespace
159 
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)160 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
161     PersistentMemoryAllocator* allocator)
162     : allocator_(allocator), record_iterator_(allocator) {}
163 
164 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
165     default;
166 
167 std::unique_ptr<PersistentSampleMapRecords>
CreateSampleMapRecords(uint64_t id)168 PersistentSparseHistogramDataManager::CreateSampleMapRecords(uint64_t id) {
169   base::AutoLock auto_lock(lock_);
170   return std::make_unique<PersistentSampleMapRecords>(
171       this, id, GetSampleMapRecordsWhileLocked(id));
172 }
173 
174 std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
GetSampleMapRecordsWhileLocked(uint64_t id)175 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
176     uint64_t id) {
177   auto* samples = &sample_records_[id];
178   if (!samples->get()) {
179     *samples = std::make_unique<std::vector<ReferenceAndSample>>();
180   }
181   return samples->get();
182 }
183 
184 std::vector<PersistentMemoryAllocator::Reference>
LoadRecords(PersistentSampleMapRecords * sample_map_records,std::optional<HistogramBase::Sample> until_value)185 PersistentSparseHistogramDataManager::LoadRecords(
186     PersistentSampleMapRecords* sample_map_records,
187     std::optional<HistogramBase::Sample> until_value) {
188   // DataManager must be locked in order to access the |sample_records_|
189   // vectors.
190   base::AutoLock auto_lock(lock_);
191 
192   // Acquiring a lock is a semi-expensive operation so load some records with
193   // each call. More than this number may be loaded if it takes longer to
194   // find at least one matching record for the passed object.
195   const size_t kMinimumNumberToLoad = 10;
196   const uint64_t match_id = sample_map_records->sample_map_id_;
197 
198   // Loop while no entry is found OR we haven't yet loaded the minimum number.
199   // This will continue reading even after a match is found. Note that it is
200   // possible that entries for the passed object were already found in a
201   // different call.
202   auto& found_records = *sample_map_records->records_;
203   bool found = (found_records.size() > sample_map_records->seen_);
204   size_t new_records = 0;
205   while (!found || new_records < kMinimumNumberToLoad) {
206     // Get the next sample-record. The iterator will always resume from where
207     // it left off even if it previously had nothing further to return.
208     uint64_t found_id;
209     HistogramBase::Sample value;
210     PersistentMemoryAllocator::Reference ref =
211         PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
212                                                      &found_id, &value);
213 
214     // Stop immediately if there are none.
215     if (!ref) {
216       break;
217     }
218     ++new_records;
219 
220     // The sample-record could be for any sparse histogram. Add the reference
221     // to the appropriate collection for later use.
222     if (found_id == match_id) {
223       found_records.emplace_back(ref, value);
224       found = true;
225     } else {
226       std::vector<ReferenceAndSample>* samples =
227           GetSampleMapRecordsWhileLocked(found_id);
228       CHECK(samples);
229       samples->emplace_back(ref, value);
230     }
231   }
232 
233   // Return all references found that have not yet been seen by
234   // |sample_map_records|, up until |until_value| (if applicable).
235   std::vector<PersistentMemoryAllocator::Reference> new_references;
236   CHECK_GE(found_records.size(), sample_map_records->seen_);
237   auto new_found_records = base::make_span(found_records)
238                                .subspan(/*offset=*/sample_map_records->seen_);
239   new_references.reserve(new_found_records.size());
240   for (const auto& new_record : new_found_records) {
241     new_references.push_back(new_record.reference);
242     // Maybe references after |until_value| were found. Stop here immediately in
243     // such a case, since the caller will not expect any more samples after
244     // |until_value|.
245     if (until_value.has_value() && new_record.value == until_value.value()) {
246       break;
247     }
248   }
249   return new_references;
250 }
251 
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id,std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample> * records)252 PersistentSampleMapRecords::PersistentSampleMapRecords(
253     PersistentSparseHistogramDataManager* data_manager,
254     uint64_t sample_map_id,
255     std::vector<PersistentSparseHistogramDataManager::ReferenceAndSample>*
256         records)
257     : data_manager_(data_manager),
258       sample_map_id_(sample_map_id),
259       records_(records) {}
260 
261 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
262 
263 std::vector<PersistentMemoryAllocator::Reference>
GetNextRecords(std::optional<HistogramBase::Sample> until_value)264 PersistentSampleMapRecords::GetNextRecords(
265     std::optional<HistogramBase::Sample> until_value) {
266   auto references = data_manager_->LoadRecords(this, until_value);
267   seen_ += references.size();
268   return references;
269 }
270 
CreateNew(HistogramBase::Sample value)271 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
272     HistogramBase::Sample value) {
273   return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
274                                                      sample_map_id_, value);
275 }
276 
277 
278 // This data will be held in persistent memory in order for processes to
279 // locate and use histograms created elsewhere.
280 struct PersistentHistogramAllocator::PersistentHistogramData {
281   // SHA1(Histogram): Increment this if structure changes!
282   static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
283 
284   // Expected size for 32/64-bit check.
285   static constexpr size_t kExpectedInstanceSize =
286       40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
287 
288   int32_t histogram_type;
289   int32_t flags;
290   int32_t minimum;
291   int32_t maximum;
292   uint32_t bucket_count;
293   PersistentMemoryAllocator::Reference ranges_ref;
294   uint32_t ranges_checksum;
295   std::atomic<PersistentMemoryAllocator::Reference> counts_ref;
296   HistogramSamples::Metadata samples_metadata;
297   HistogramSamples::Metadata logged_metadata;
298 
299   // Space for the histogram name will be added during the actual allocation
300   // request. This must be the last field of the structure. A zero-size array
301   // or a "flexible" array would be preferred but is not (yet) valid C++.
302   char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
303 };
304 
Iterator(PersistentHistogramAllocator * allocator)305 PersistentHistogramAllocator::Iterator::Iterator(
306     PersistentHistogramAllocator* allocator)
307     : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
308 
309 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)310 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
311   PersistentMemoryAllocator::Reference ref;
312   while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
313     if (ref != ignore)
314       return allocator_->GetHistogram(ref);
315   }
316   return nullptr;
317 }
318 
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)319 PersistentHistogramAllocator::PersistentHistogramAllocator(
320     std::unique_ptr<PersistentMemoryAllocator> memory)
321     : memory_allocator_(std::move(memory)),
322       sparse_histogram_data_manager_(memory_allocator_.get()) {}
323 
324 PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
325 
GetHistogram(Reference ref)326 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
327     Reference ref) {
328   // Unfortunately, the histogram "pickle" methods cannot be used as part of
329   // the persistance because the deserialization methods always create local
330   // count data (while these must reference the persistent counts) and always
331   // add it to the local list of known histograms (while these may be simple
332   // references to histograms in other processes).
333   PersistentHistogramData* data =
334       memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
335   const size_t length = memory_allocator_->GetAllocSize(ref);
336 
337   // Check that metadata is reasonable: name is null-terminated and non-empty,
338   // ID fields have been loaded with a hash of the name (0 is considered
339   // unset/invalid).
340   if (!data || data->name[0] == '\0' ||
341       reinterpret_cast<char*>(data)[length - 1] != '\0' ||
342       data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
343       // Note: Sparse histograms use |id + 1| in |logged_metadata|.
344       (data->logged_metadata.id != data->samples_metadata.id &&
345        data->logged_metadata.id != data->samples_metadata.id + 1) ||
346       // Most non-matching values happen due to truncated names. Ideally, we
347       // could just verify the name length based on the overall alloc length,
348       // but that doesn't work because the allocated block may have been
349       // aligned to the next boundary value.
350       HashMetricName(data->name) != data->samples_metadata.id) {
351     return nullptr;
352   }
353   return CreateHistogram(data);
354 }
355 
AllocateHistogram(HistogramType histogram_type,std::string_view name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)356 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
357     HistogramType histogram_type,
358     std::string_view name,
359     int minimum,
360     int maximum,
361     const BucketRanges* bucket_ranges,
362     int32_t flags,
363     Reference* ref_ptr) {
364   // If the allocator is corrupt, don't waste time trying anything else.
365   // This also allows differentiating on the dashboard between allocations
366   // failed due to a corrupt allocator and the number of process instances
367   // with one, the latter being idicated by "newly corrupt", below.
368   if (memory_allocator_->IsCorrupt())
369     return nullptr;
370 
371   // Create the metadata necessary for a persistent sparse histogram. This
372   // is done first because it is a small subset of what is required for
373   // other histograms. The type is "under construction" so that a crash
374   // during the datafill doesn't leave a bad record around that could cause
375   // confusion by another process trying to read it. It will be corrected
376   // once histogram construction is complete.
377   PersistentHistogramData* histogram_data =
378       memory_allocator_->New<PersistentHistogramData>(
379           offsetof(PersistentHistogramData, name) + name.size() + 1);
380   if (histogram_data) {
381     memcpy(histogram_data->name, name.data(), name.size());
382     histogram_data->name[name.size()] = '\0';
383     histogram_data->histogram_type = histogram_type;
384     histogram_data->flags = flags | HistogramBase::kIsPersistent;
385 
386     // |counts_ref| relies on being zero'd out initially. Even though this
387     // should always be the case, manually zero it out again here in case there
388     // was memory corruption (e.g. if the memory was mapped from a corrupted
389     // spare file).
390     // TODO(crbug.com/1432981): Remove this if this has no effect, and try to
391     // understand better why there is sometimes garbage written in this field.
392     histogram_data->counts_ref.store(0, std::memory_order_relaxed);
393   }
394 
395   // Create the remaining metadata necessary for regular histograms.
396   if (histogram_type != SPARSE_HISTOGRAM) {
397     size_t bucket_count = bucket_ranges->bucket_count();
398     size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
399     if (counts_bytes == 0) {
400       // |bucket_count| was out-of-range.
401       return nullptr;
402     }
403 
404     // Since the StasticsRecorder keeps a global collection of BucketRanges
405     // objects for re-use, it would be dangerous for one to hold a reference
406     // from a persistent allocator that is not the global one (which is
407     // permanent once set). If this stops being the case, this check can
408     // become an "if" condition beside "!ranges_ref" below and before
409     // set_persistent_reference() farther down.
410     DCHECK_EQ(this, GlobalHistogramAllocator::Get());
411 
412     // Re-use an existing BucketRanges persistent allocation if one is known;
413     // otherwise, create one.
414     PersistentMemoryAllocator::Reference ranges_ref =
415         bucket_ranges->persistent_reference();
416     if (!ranges_ref) {
417       size_t ranges_count = bucket_count + 1;
418       size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
419       ranges_ref =
420           memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
421       if (ranges_ref) {
422         HistogramBase::Sample* ranges_data =
423             memory_allocator_->GetAsArray<HistogramBase::Sample>(
424                 ranges_ref, kTypeIdRangesArray, ranges_count);
425         if (ranges_data) {
426           for (size_t i = 0; i < bucket_ranges->size(); ++i)
427             ranges_data[i] = bucket_ranges->range(i);
428           bucket_ranges->set_persistent_reference(ranges_ref);
429         } else {
430           // This should never happen but be tolerant if it does.
431           ranges_ref = PersistentMemoryAllocator::kReferenceNull;
432         }
433       }
434     } else {
435       DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
436     }
437 
438 
439     // Only continue here if all allocations were successful. If they weren't,
440     // there is no way to free the space but that's not really a problem since
441     // the allocations only fail because the space is full or corrupt and so
442     // any future attempts will also fail.
443     if (ranges_ref && histogram_data) {
444       histogram_data->minimum = minimum;
445       histogram_data->maximum = maximum;
446       // |bucket_count| must fit within 32-bits or the allocation of the counts
447       // array would have failed for being too large; the allocator supports
448       // less than 4GB total size.
449       histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
450       histogram_data->ranges_ref = ranges_ref;
451       histogram_data->ranges_checksum = bucket_ranges->checksum();
452     } else {
453       histogram_data = nullptr;  // Clear this for proper handling below.
454     }
455   }
456 
457   if (histogram_data) {
458     // Create the histogram using resources in persistent memory. This ends up
459     // resolving the "ref" values stored in histogram_data instad of just
460     // using what is already known above but avoids duplicating the switch
461     // statement here and serves as a double-check that everything is
462     // correct before commiting the new histogram to persistent space.
463     std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
464     DCHECK(histogram);
465     DCHECK_NE(0U, histogram_data->samples_metadata.id);
466     DCHECK_NE(0U, histogram_data->logged_metadata.id);
467 
468     PersistentMemoryAllocator::Reference histogram_ref =
469         memory_allocator_->GetAsReference(histogram_data);
470     if (ref_ptr != nullptr)
471       *ref_ptr = histogram_ref;
472 
473     // By storing the reference within the allocator to this histogram, the
474     // next import (which will happen before the next histogram creation)
475     // will know to skip it.
476     // See also the comment in ImportHistogramsToStatisticsRecorder().
477     last_created_.store(histogram_ref, std::memory_order_relaxed);
478     return histogram;
479   }
480 
481   return nullptr;
482 }
483 
FinalizeHistogram(Reference ref,bool registered)484 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
485                                                      bool registered) {
486   if (registered) {
487     // If the created persistent histogram was registered then it needs to
488     // be marked as "iterable" in order to be found by other processes. This
489     // happens only after the histogram is fully formed so it's impossible for
490     // code iterating through the allocator to read a partially created record.
491     memory_allocator_->MakeIterable(ref);
492   } else {
493     // If it wasn't registered then a race condition must have caused two to
494     // be created. The allocator does not support releasing the acquired memory
495     // so just change the type to be empty.
496     memory_allocator_->ChangeType(ref, 0,
497                                   PersistentHistogramData::kPersistentTypeId,
498                                   /*clear=*/false);
499   }
500 }
501 
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)502 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
503     HistogramBase* histogram) {
504   DCHECK(histogram);
505 
506   // Return immediately if the histogram has no samples since the last delta
507   // snapshot. This is to prevent looking up or registering the histogram with
508   // the StatisticsRecorder, which requires acquiring a lock.
509   std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
510   if (samples->IsDefinitelyEmpty()) {
511     return;
512   }
513 
514   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
515   if (!existing) {
516     // The above should never fail but if it does, no real harm is done.
517     // Some metric data will be lost but that is better than crashing.
518     return;
519   }
520 
521   MergeSamplesToExistingHistogram(existing, histogram, std::move(samples));
522 }
523 
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)524 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
525     const HistogramBase* histogram) {
526   DCHECK(histogram);
527 
528   // Return immediately if the histogram has no samples. This is to prevent
529   // looking up or registering the histogram with the StatisticsRecorder, which
530   // requires acquiring a lock.
531   std::unique_ptr<HistogramSamples> samples = histogram->SnapshotFinalDelta();
532   if (samples->IsDefinitelyEmpty()) {
533     return;
534   }
535 
536   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
537   if (!existing) {
538     // The above should never fail but if it does, no real harm is done.
539     // Some metric data will be lost but that is better than crashing.
540     return;
541   }
542 
543   MergeSamplesToExistingHistogram(existing, histogram, std::move(samples));
544 }
545 
546 std::unique_ptr<PersistentSampleMapRecords>
CreateSampleMapRecords(uint64_t id)547 PersistentHistogramAllocator::CreateSampleMapRecords(uint64_t id) {
548   return sparse_histogram_data_manager_.CreateSampleMapRecords(id);
549 }
550 
CreateTrackingHistograms(std::string_view name)551 void PersistentHistogramAllocator::CreateTrackingHistograms(
552     std::string_view name) {
553   memory_allocator_->CreateTrackingHistograms(name);
554 }
555 
UpdateTrackingHistograms()556 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
557   memory_allocator_->UpdateTrackingHistograms();
558 }
559 
SetRangesManager(RangesManager * ranges_manager)560 void PersistentHistogramAllocator::SetRangesManager(
561     RangesManager* ranges_manager) {
562   ranges_manager_.reset(ranges_manager);
563 }
564 
ClearLastCreatedReferenceForTesting()565 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
566   last_created_.store(0, std::memory_order_relaxed);
567 }
568 
CreateHistogram(PersistentHistogramData * histogram_data_ptr)569 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
570     PersistentHistogramData* histogram_data_ptr) {
571   if (!histogram_data_ptr)
572     return nullptr;
573 
574   // Sparse histograms are quite different so handle them as a special case.
575   if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
576     std::unique_ptr<HistogramBase> histogram =
577         SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
578                                           &histogram_data_ptr->samples_metadata,
579                                           &histogram_data_ptr->logged_metadata);
580     DCHECK(histogram);
581     histogram->SetFlags(histogram_data_ptr->flags);
582     return histogram;
583   }
584 
585   // Copy the configuration fields from histogram_data_ptr to local storage
586   // because anything in persistent memory cannot be trusted as it could be
587   // changed at any moment by a malicious actor that shares access. The local
588   // values are validated below and then used to create the histogram, knowing
589   // they haven't changed between validation and use.
590   int32_t histogram_type = histogram_data_ptr->histogram_type;
591   int32_t histogram_flags = histogram_data_ptr->flags;
592   int32_t histogram_minimum = histogram_data_ptr->minimum;
593   int32_t histogram_maximum = histogram_data_ptr->maximum;
594   uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
595   uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
596   uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
597 
598   HistogramBase::Sample* ranges_data =
599       memory_allocator_->GetAsArray<HistogramBase::Sample>(
600           histogram_ranges_ref, kTypeIdRangesArray,
601           PersistentMemoryAllocator::kSizeAny);
602 
603   const uint32_t max_buckets =
604       std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
605   size_t required_bytes =
606       (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
607   size_t allocated_bytes =
608       memory_allocator_->GetAllocSize(histogram_ranges_ref);
609   if (!ranges_data || histogram_bucket_count < 2 ||
610       histogram_bucket_count >= max_buckets ||
611       allocated_bytes < required_bytes) {
612     return nullptr;
613   }
614 
615   std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
616       ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
617   if (!created_ranges || created_ranges->size() != histogram_bucket_count + 1 ||
618       created_ranges->range(1) != histogram_minimum ||
619       created_ranges->range(histogram_bucket_count - 1) != histogram_maximum) {
620     return nullptr;
621   }
622   const BucketRanges* ranges;
623   if (ranges_manager_) {
624     ranges =
625         ranges_manager_->GetOrRegisterCanonicalRanges(created_ranges.get());
626     if (ranges == created_ranges.get()) {
627       // `ranges_manager_` took ownership of `created_ranges`.
628       created_ranges.release();
629     }
630   } else {
631     ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
632         created_ranges.release());
633   }
634 
635   size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
636   PersistentMemoryAllocator::Reference counts_ref =
637       histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
638   if (counts_bytes == 0 ||
639       (counts_ref != 0 &&
640        memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
641     return nullptr;
642   }
643 
644   // The "counts" data (including both samples and logged samples) is a delayed
645   // persistent allocation meaning that though its size and storage for a
646   // reference is defined, no space is reserved until actually needed. When
647   // it is needed, memory will be allocated from the persistent segment and
648   // a reference to it stored at the passed address. Other threads can then
649   // notice the valid reference and access the same data.
650   DelayedPersistentAllocation counts_data(memory_allocator_.get(),
651                                           &histogram_data_ptr->counts_ref,
652                                           kTypeIdCountsArray, counts_bytes);
653 
654   // A second delayed allocations is defined using the same reference storage
655   // location as the first so the allocation of one will automatically be found
656   // by the other. Within the block, the first half of the space is for "counts"
657   // and the second half is for "logged counts".
658   DelayedPersistentAllocation logged_data(
659       memory_allocator_.get(), &histogram_data_ptr->counts_ref,
660       kTypeIdCountsArray, counts_bytes, counts_bytes / 2);
661 
662   // Create the right type of histogram.
663   const char* name = histogram_data_ptr->name;
664   std::unique_ptr<HistogramBase> histogram;
665   switch (histogram_type) {
666     case HISTOGRAM:
667       histogram =
668           Histogram::PersistentCreate(name, ranges, counts_data, logged_data,
669                                       &histogram_data_ptr->samples_metadata,
670                                       &histogram_data_ptr->logged_metadata);
671       DCHECK(histogram);
672       break;
673     case LINEAR_HISTOGRAM:
674       histogram = LinearHistogram::PersistentCreate(
675           name, ranges, counts_data, logged_data,
676           &histogram_data_ptr->samples_metadata,
677           &histogram_data_ptr->logged_metadata);
678       DCHECK(histogram);
679       break;
680     case BOOLEAN_HISTOGRAM:
681       histogram = BooleanHistogram::PersistentCreate(
682           name, ranges, counts_data, logged_data,
683           &histogram_data_ptr->samples_metadata,
684           &histogram_data_ptr->logged_metadata);
685       DCHECK(histogram);
686       break;
687     case CUSTOM_HISTOGRAM:
688       histogram = CustomHistogram::PersistentCreate(
689           name, ranges, counts_data, logged_data,
690           &histogram_data_ptr->samples_metadata,
691           &histogram_data_ptr->logged_metadata);
692       DCHECK(histogram);
693       break;
694     default:
695       return nullptr;
696   }
697 
698   if (histogram) {
699     DCHECK_EQ(histogram_type, histogram->GetHistogramType());
700     histogram->SetFlags(histogram_flags);
701   }
702 
703   return histogram;
704 }
705 
706 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)707 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
708     const HistogramBase* histogram) {
709   // This should never be called on the global histogram allocator as objects
710   // created there are already within the global statistics recorder.
711   DCHECK_NE(GlobalHistogramAllocator::Get(), this);
712   DCHECK(histogram);
713 
714   HistogramBase* existing =
715       StatisticsRecorder::FindHistogram(histogram->histogram_name());
716   if (existing) {
717     return existing;
718   }
719 
720   // Adding the passed histogram to the SR would cause a problem if the
721   // allocator that holds it eventually goes away. Instead, create a new
722   // one from a serialized version. Deserialization calls the appropriate
723   // FactoryGet() which will create the histogram in the global persistent-
724   // histogram allocator if such is set.
725   base::Pickle pickle;
726   histogram->SerializeInfo(&pickle);
727   PickleIterator iter(pickle);
728   existing = DeserializeHistogramInfo(&iter);
729   if (!existing)
730     return nullptr;
731 
732   // Make sure there is no "serialization" flag set.
733   DCHECK(!existing->HasFlags(HistogramBase::kIPCSerializationSourceFlag));
734   // Record the newly created histogram in the SR.
735   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
736 }
737 
~GlobalHistogramAllocator()738 GlobalHistogramAllocator::~GlobalHistogramAllocator() {
739   // GlobalHistogramAllocator should never be destroyed because Histogram
740   // objects may keep pointers to its memory.
741   NOTREACHED();
742 }
743 
744 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,std::string_view name)745 void GlobalHistogramAllocator::CreateWithPersistentMemory(
746     void* base,
747     size_t size,
748     size_t page_size,
749     uint64_t id,
750     std::string_view name) {
751   Set(new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
752       base, size, page_size, id, name, PersistentMemoryAllocator::kReadWrite)));
753 }
754 
755 // static
CreateWithLocalMemory(size_t size,uint64_t id,std::string_view name)756 void GlobalHistogramAllocator::CreateWithLocalMemory(size_t size,
757                                                      uint64_t id,
758                                                      std::string_view name) {
759   Set(new GlobalHistogramAllocator(
760       std::make_unique<LocalPersistentMemoryAllocator>(size, id, name)));
761 }
762 
763 #if !BUILDFLAG(IS_NACL)
764 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,std::string_view name,bool exclusive_write)765 bool GlobalHistogramAllocator::CreateWithFile(const FilePath& file_path,
766                                               size_t size,
767                                               uint64_t id,
768                                               std::string_view name,
769                                               bool exclusive_write) {
770   uint32_t flags = File::FLAG_OPEN_ALWAYS | File::FLAG_WIN_SHARE_DELETE |
771                    File::FLAG_READ | File::FLAG_WRITE;
772   if (exclusive_write)
773     flags |= File::FLAG_WIN_EXCLUSIVE_WRITE;
774   File file(file_path, flags);
775   if (!file.IsValid())
776     return false;
777 
778   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
779   bool success = false;
780   const bool file_created = file.created();
781   if (file_created) {
782     success = mmfile->Initialize(std::move(file), {0, size},
783                                  MemoryMappedFile::READ_WRITE_EXTEND);
784   } else {
785     success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
786   }
787   if (!success ||
788       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
789     if (file_created) {
790       // If we created the file, but it couldn't be used, delete it.
791       // This could happen if we were able to create a file of all-zeroes, but
792       // couldn't write to it due to lack of disk space.
793       base::DeleteFile(file_path);
794     }
795     return false;
796   }
797 
798   Set(new GlobalHistogramAllocator(
799       std::make_unique<FilePersistentMemoryAllocator>(
800           std::move(mmfile), 0, id, name,
801           PersistentMemoryAllocator::kReadWrite)));
802   Get()->SetPersistentLocation(file_path);
803   return true;
804 }
805 
806 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,const FilePath & spare_path,size_t size,uint64_t id,std::string_view name)807 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
808                                                     const FilePath& active_path,
809                                                     const FilePath& spare_path,
810                                                     size_t size,
811                                                     uint64_t id,
812                                                     std::string_view name) {
813   // Old "active" becomes "base".
814   if (!base::ReplaceFile(active_path, base_path, nullptr))
815     base::DeleteFile(base_path);
816   if (base::PathExists(active_path))
817     return false;
818 
819   // Move any "spare" into "active". Okay to continue if file doesn't exist.
820   if (!spare_path.empty())
821     base::ReplaceFile(spare_path, active_path, nullptr);
822 
823   return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
824                                                         name);
825 }
826 
827 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,std::string_view name)828 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(
829     const FilePath& dir,
830     size_t size,
831     uint64_t id,
832     std::string_view name) {
833   FilePath base_path = ConstructFilePath(dir, name);
834   FilePath active_path = ConstructFilePathForActiveFile(dir, name);
835   FilePath spare_path = ConstructFilePath(dir, std::string(name) + "-spare");
836   return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
837                               name);
838 }
839 
840 // static
ConstructFilePath(const FilePath & dir,std::string_view name)841 FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
842                                                      std::string_view name) {
843   return dir.AppendASCII(name).AddExtension(
844       PersistentMemoryAllocator::kFileExtension);
845 }
846 
847 // static
ConstructFilePathForActiveFile(const FilePath & dir,std::string_view name)848 FilePath GlobalHistogramAllocator::ConstructFilePathForActiveFile(
849     const FilePath& dir,
850     std::string_view name) {
851   return ConstructFilePath(dir, std::string(name) + "-active");
852 }
853 
854 // static
ConstructFilePathForUploadDir(const FilePath & dir,std::string_view name,base::Time stamp,ProcessId pid)855 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
856     const FilePath& dir,
857     std::string_view name,
858     base::Time stamp,
859     ProcessId pid) {
860   return ConstructFilePath(
861       dir,
862       StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
863                    static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
864 }
865 
866 // static
ConstructFilePathForUploadDir(const FilePath & dir,std::string_view name)867 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
868     const FilePath& dir,
869     std::string_view name) {
870   return ConstructFilePathForUploadDir(dir, name, Time::Now(),
871                                        GetCurrentProcId());
872 }
873 
874 // static
ParseFilePath(const FilePath & path,std::string * out_name,Time * out_stamp,ProcessId * out_pid)875 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
876                                              std::string* out_name,
877                                              Time* out_stamp,
878                                              ProcessId* out_pid) {
879   std::string filename = path.BaseName().AsUTF8Unsafe();
880   std::vector<std::string_view> parts = base::SplitStringPiece(
881       filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
882   if (parts.size() != 4)
883     return false;
884 
885   if (out_name)
886     *out_name = std::string(parts[0]);
887 
888   if (out_stamp) {
889     int64_t stamp;
890     if (!HexStringToInt64(parts[1], &stamp))
891       return false;
892     *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
893   }
894 
895   if (out_pid) {
896     int64_t pid;
897     if (!HexStringToInt64(parts[2], &pid))
898       return false;
899     *out_pid = static_cast<ProcessId>(pid);
900   }
901 
902   return true;
903 }
904 
CreateSpareFile(const FilePath & spare_path,size_t size)905 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
906                                                size_t size) {
907   // If the spare file already exists, it was created in a previous session and
908   // is still unused, so do nothing.
909   if (base::PathExists(spare_path)) {
910     return false;
911   }
912   FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
913   bool success;
914   {
915     File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
916                                          File::FLAG_READ | File::FLAG_WRITE);
917     success = spare_file.IsValid();
918 
919     if (success) {
920       MemoryMappedFile mmfile;
921       success = mmfile.Initialize(std::move(spare_file), {0, size},
922                                   MemoryMappedFile::READ_WRITE_EXTEND);
923     }
924   }
925 
926   if (success)
927     success = ReplaceFile(temp_spare_path, spare_path, nullptr);
928 
929   if (!success)
930     DeleteFile(temp_spare_path);
931 
932   return success;
933 }
934 #endif  // !BUILDFLAG(IS_NACL)
935 
936 // static
CreateWithSharedMemoryRegion(const UnsafeSharedMemoryRegion & region)937 void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
938     const UnsafeSharedMemoryRegion& region) {
939   CHECK_EQ(Get(), nullptr) << "Histogram allocator has already been created";
940 
941   base::WritableSharedMemoryMapping mapping = region.Map();
942   if (!mapping.IsValid() ||
943       !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
944           mapping)) {
945     DVLOG(1) << "Shared memory region is invalid or unacceptable.";
946     return;
947   }
948 
949   DVLOG(1) << "Global histogram allocator initialized.";
950   Set(new GlobalHistogramAllocator(
951       std::make_unique<WritableSharedPersistentMemoryAllocator>(
952           std::move(mapping), 0, std::string_view())));
953 }
954 
955 // static
Set(GlobalHistogramAllocator * allocator)956 void GlobalHistogramAllocator::Set(GlobalHistogramAllocator* allocator) {
957   // Releasing or changing an allocator is extremely dangerous because it
958   // likely has histograms stored within it. If the backing memory is also
959   // also released, future accesses to those histograms will seg-fault.
960   CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
961   subtle::Release_Store(&g_histogram_allocator,
962                         reinterpret_cast<intptr_t>(allocator));
963 
964   // Record the number of histograms that were sampled before the global
965   // histogram allocator was initialized.
966   //
967   // TODO(crbug/1504919): CHECK(histogram_count == 0) and remove emit of early
968   // histogram count once |histogram_count| is reliably zero (0) for all process
969   // types.
970   size_t histogram_count = StatisticsRecorder::GetHistogramCount();
971   if (histogram_count != 0) {
972     DVLOG(1) << histogram_count
973              << " histogram(s) created before persistence was enabled.";
974 
975     if (allocator && allocator->Name() && allocator->Name()[0]) {
976       UmaHistogramCounts100(StrCat({"UMA.PersistentAllocator.EarlyHistograms.",
977                                     allocator->Name()}),
978                             static_cast<int>(histogram_count));
979     }
980   }
981 }
982 
983 // static
Get()984 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
985   return reinterpret_cast<GlobalHistogramAllocator*>(
986       subtle::Acquire_Load(&g_histogram_allocator));
987 }
988 
989 // static
ReleaseForTesting()990 GlobalHistogramAllocator* GlobalHistogramAllocator::ReleaseForTesting() {
991   GlobalHistogramAllocator* histogram_allocator = Get();
992   if (!histogram_allocator)
993     return nullptr;
994   PersistentMemoryAllocator* memory_allocator =
995       histogram_allocator->memory_allocator();
996 
997   // Before releasing the memory, it's necessary to have the Statistics-
998   // Recorder forget about the histograms contained therein; otherwise,
999   // some operations will try to access them and the released memory.
1000   PersistentMemoryAllocator::Iterator iter(memory_allocator);
1001   const PersistentHistogramData* data;
1002   while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
1003     StatisticsRecorder::ForgetHistogramForTesting(data->name);
1004   }
1005 
1006   subtle::Release_Store(&g_histogram_allocator, 0);
1007   ANNOTATE_LEAKING_OBJECT_PTR(histogram_allocator);
1008   return histogram_allocator;
1009 }
1010 
SetPersistentLocation(const FilePath & location)1011 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
1012   persistent_location_ = location;
1013 }
1014 
GetPersistentLocation() const1015 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
1016   return persistent_location_;
1017 }
1018 
HasPersistentLocation() const1019 bool GlobalHistogramAllocator::HasPersistentLocation() const {
1020   return !persistent_location_.empty();
1021 }
1022 
MovePersistentFile(const FilePath & dir)1023 bool GlobalHistogramAllocator::MovePersistentFile(const FilePath& dir) {
1024   DCHECK(HasPersistentLocation());
1025 
1026   FilePath new_file_path = dir.Append(persistent_location_.BaseName());
1027 
1028   // Change the location of the persistent file. This is fine to do even though
1029   // the file is currently "opened" by this process.
1030   if (!base::ReplaceFile(persistent_location_, new_file_path, nullptr)) {
1031     return false;
1032   }
1033 
1034   SetPersistentLocation(new_file_path);
1035   return true;
1036 }
1037 
WriteToPersistentLocation()1038 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
1039 #if BUILDFLAG(IS_NACL)
1040   // NACL doesn't support file operations, including ImportantFileWriter.
1041   NOTREACHED();
1042   return false;
1043 #else
1044   // Stop if no destination is set.
1045   if (!HasPersistentLocation()) {
1046     NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
1047                  << " to file because no location was set.";
1048     return false;
1049   }
1050 
1051   std::string_view contents(static_cast<const char*>(data()), used());
1052   if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
1053                                                 contents)) {
1054     LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
1055                << " to file: " << persistent_location_.value();
1056     return false;
1057   }
1058 
1059   return true;
1060 #endif
1061 }
1062 
DeletePersistentLocation()1063 void GlobalHistogramAllocator::DeletePersistentLocation() {
1064   memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
1065 
1066 #if BUILDFLAG(IS_NACL)
1067   NOTREACHED();
1068 #else
1069   if (!HasPersistentLocation()) {
1070     return;
1071   }
1072 
1073   // Open (with delete) and then immediately close the file by going out of
1074   // scope. This is the only cross-platform safe way to delete a file that may
1075   // be open elsewhere. Open handles will continue to operate normally but
1076   // new opens will not be possible.
1077   File file(persistent_location_,
1078             File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
1079 #endif
1080 }
1081 
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)1082 GlobalHistogramAllocator::GlobalHistogramAllocator(
1083     std::unique_ptr<PersistentMemoryAllocator> memory)
1084     : PersistentHistogramAllocator(std::move(memory)),
1085       import_iterator_(this) {
1086 }
1087 
ImportHistogramsToStatisticsRecorder()1088 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
1089   // Skip the import if it's the histogram that was last created. Should a
1090   // race condition cause the "last created" to be overwritten before it
1091   // is recognized here then the histogram will be created and be ignored
1092   // when it is detected as a duplicate by the statistics-recorder. This
1093   // simple check reduces the time of creating persistent histograms by
1094   // about 40%.
1095   Reference record_to_ignore = last_created();
1096 
1097   // There is no lock on this because the iterator is lock-free while still
1098   // guaranteed to only return each entry only once. The StatisticsRecorder
1099   // has its own lock so the Register operation is safe.
1100   while (true) {
1101     std::unique_ptr<HistogramBase> histogram =
1102         import_iterator_.GetNextWithIgnore(record_to_ignore);
1103     if (!histogram)
1104       break;
1105     StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1106   }
1107 }
1108 
1109 }  // namespace base
1110