xref: /aosp_15_r20/external/cronet/base/metrics/persistent_sample_map.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_sample_map.h"
6 
7 #include "base/atomicops.h"
8 #include "base/check_op.h"
9 #include "base/containers/contains.h"
10 #include "base/debug/crash_logging.h"
11 #include "base/metrics/histogram_macros.h"
12 #include "base/metrics/persistent_histogram_allocator.h"
13 #include "base/notreached.h"
14 #include "base/numerics/safe_conversions.h"
15 
16 namespace base {
17 
18 typedef HistogramBase::Count Count;
19 typedef HistogramBase::Sample Sample;
20 
21 namespace {
22 
23 // An iterator for going through a PersistentSampleMap. The logic here is
24 // identical to that of the iterator for SampleMap but with different data
25 // structures. Changes here likely need to be duplicated there.
26 template <typename T, typename I>
27 class IteratorTemplate : public SampleCountIterator {
28  public:
IteratorTemplate(T & sample_counts)29   explicit IteratorTemplate(T& sample_counts)
30       : iter_(sample_counts.begin()), end_(sample_counts.end()) {
31     SkipEmptyBuckets();
32   }
33 
34   ~IteratorTemplate() override;
35 
36   // SampleCountIterator:
Done() const37   bool Done() const override { return iter_ == end_; }
Next()38   void Next() override {
39     DCHECK(!Done());
40     ++iter_;
41     SkipEmptyBuckets();
42   }
43   void Get(HistogramBase::Sample* min,
44            int64_t* max,
45            HistogramBase::Count* count) override;
46 
47  private:
SkipEmptyBuckets()48   void SkipEmptyBuckets() {
49     while (!Done() && subtle::NoBarrier_Load(iter_->second) == 0) {
50       ++iter_;
51     }
52   }
53 
54   I iter_;
55   const I end_;
56 };
57 
58 typedef std::map<HistogramBase::Sample, HistogramBase::Count*> SampleToCountMap;
59 typedef IteratorTemplate<const SampleToCountMap,
60                          SampleToCountMap::const_iterator>
61     PersistentSampleMapIterator;
62 
63 template <>
64 PersistentSampleMapIterator::~IteratorTemplate() = default;
65 
66 // Get() for an iterator of a PersistentSampleMap.
67 template <>
Get(Sample * min,int64_t * max,Count * count)68 void PersistentSampleMapIterator::Get(Sample* min, int64_t* max, Count* count) {
69   DCHECK(!Done());
70   *min = iter_->first;
71   *max = strict_cast<int64_t>(iter_->first) + 1;
72   // We have to do the following atomically, because even if the caller is using
73   // a lock, a separate process (that is not aware of this lock) may
74   // concurrently modify the value (note that iter_->second is a pointer to a
75   // sample count, which may live in shared memory).
76   *count = subtle::NoBarrier_Load(iter_->second);
77 }
78 
79 typedef IteratorTemplate<SampleToCountMap, SampleToCountMap::iterator>
80     ExtractingPersistentSampleMapIterator;
81 
82 template <>
~IteratorTemplate()83 ExtractingPersistentSampleMapIterator::~IteratorTemplate() {
84   // Ensure that the user has consumed all the samples in order to ensure no
85   // samples are lost.
86   DCHECK(Done());
87 }
88 
89 // Get() for an extracting iterator of a PersistentSampleMap.
90 template <>
Get(Sample * min,int64_t * max,Count * count)91 void ExtractingPersistentSampleMapIterator::Get(Sample* min,
92                                                 int64_t* max,
93                                                 Count* count) {
94   DCHECK(!Done());
95   *min = iter_->first;
96   *max = strict_cast<int64_t>(iter_->first) + 1;
97   // We have to do the following atomically, because even if the caller is using
98   // a lock, a separate process (that is not aware of this lock) may
99   // concurrently modify the value (note that iter_->second is a pointer to a
100   // sample count, which may live in shared memory).
101   *count = subtle::NoBarrier_AtomicExchange(iter_->second, 0);
102 }
103 
104 // This structure holds an entry for a PersistentSampleMap within a persistent
105 // memory allocator. The "id" must be unique across all maps held by an
106 // allocator or they will get attached to the wrong sample map.
107 struct SampleRecord {
108   // SHA1(SampleRecord): Increment this if structure changes!
109   static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
110 
111   // Expected size for 32/64-bit check.
112   static constexpr size_t kExpectedInstanceSize = 16;
113 
114   uint64_t id;   // Unique identifier of owner.
115   Sample value;  // The value for which this record holds a count.
116   Count count;   // The count associated with the above value.
117 };
118 
119 }  // namespace
120 
PersistentSampleMap(uint64_t id,PersistentHistogramAllocator * allocator,Metadata * meta)121 PersistentSampleMap::PersistentSampleMap(
122     uint64_t id,
123     PersistentHistogramAllocator* allocator,
124     Metadata* meta)
125     : HistogramSamples(id, meta), allocator_(allocator) {}
126 
127 PersistentSampleMap::~PersistentSampleMap() = default;
128 
Accumulate(Sample value,Count count)129 void PersistentSampleMap::Accumulate(Sample value, Count count) {
130   // We have to do the following atomically, because even if the caller is using
131   // a lock, a separate process (that is not aware of this lock) may
132   // concurrently modify the value.
133   subtle::NoBarrier_AtomicIncrement(GetOrCreateSampleCountStorage(value),
134                                     count);
135   IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
136 }
137 
GetCount(Sample value) const138 Count PersistentSampleMap::GetCount(Sample value) const {
139   // Have to override "const" to make sure all samples have been loaded before
140   // being able to know what value to return.
141   Count* count_pointer =
142       const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
143   return count_pointer ? subtle::NoBarrier_Load(count_pointer) : 0;
144 }
145 
TotalCount() const146 Count PersistentSampleMap::TotalCount() const {
147   // Have to override "const" in order to make sure all samples have been
148   // loaded before trying to iterate over the map.
149   const_cast<PersistentSampleMap*>(this)->ImportSamples(
150       /*until_value=*/std::nullopt);
151 
152   Count count = 0;
153   for (const auto& entry : sample_counts_) {
154     count += subtle::NoBarrier_Load(entry.second);
155   }
156   return count;
157 }
158 
Iterator() const159 std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
160   // Have to override "const" in order to make sure all samples have been
161   // loaded before trying to iterate over the map.
162   const_cast<PersistentSampleMap*>(this)->ImportSamples(
163       /*until_value=*/std::nullopt);
164   return std::make_unique<PersistentSampleMapIterator>(sample_counts_);
165 }
166 
ExtractingIterator()167 std::unique_ptr<SampleCountIterator> PersistentSampleMap::ExtractingIterator() {
168   // Make sure all samples have been loaded before trying to iterate over the
169   // map.
170   ImportSamples(/*until_value=*/std::nullopt);
171   return std::make_unique<ExtractingPersistentSampleMapIterator>(
172       sample_counts_);
173 }
174 
IsDefinitelyEmpty() const175 bool PersistentSampleMap::IsDefinitelyEmpty() const {
176   // Not implemented.
177   NOTREACHED();
178 
179   // Always return false. If we are wrong, this will just make the caller
180   // perform some extra work thinking that |this| is non-empty.
181   return false;
182 }
183 
184 // static
185 PersistentMemoryAllocator::Reference
GetNextPersistentRecord(PersistentMemoryAllocator::Iterator & iterator,uint64_t * sample_map_id,Sample * value)186 PersistentSampleMap::GetNextPersistentRecord(
187     PersistentMemoryAllocator::Iterator& iterator,
188     uint64_t* sample_map_id,
189     Sample* value) {
190   const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
191   if (!record) {
192     return 0;
193   }
194 
195   *sample_map_id = record->id;
196   *value = record->value;
197   return iterator.GetAsReference(record);
198 }
199 
200 // static
201 PersistentMemoryAllocator::Reference
CreatePersistentRecord(PersistentMemoryAllocator * allocator,uint64_t sample_map_id,Sample value)202 PersistentSampleMap::CreatePersistentRecord(
203     PersistentMemoryAllocator* allocator,
204     uint64_t sample_map_id,
205     Sample value) {
206   SampleRecord* record = allocator->New<SampleRecord>();
207   if (!record) {
208     if (!allocator->IsFull()) {
209 #if !BUILDFLAG(IS_NACL)
210       // TODO(crbug/1432981): Remove these. They are used to investigate
211       // unexpected failures.
212       SCOPED_CRASH_KEY_BOOL("PersistentSampleMap", "corrupted",
213                             allocator->IsCorrupt());
214 #endif  // !BUILDFLAG(IS_NACL)
215       DUMP_WILL_BE_NOTREACHED_NORETURN()
216           << "corrupt=" << allocator->IsCorrupt();
217     }
218     return 0;
219   }
220 
221   record->id = sample_map_id;
222   record->value = value;
223   record->count = 0;
224 
225   PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
226   allocator->MakeIterable(ref);
227   return ref;
228 }
229 
AddSubtractImpl(SampleCountIterator * iter,Operator op)230 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
231                                           Operator op) {
232   Sample min;
233   int64_t max;
234   Count count;
235   for (; !iter->Done(); iter->Next()) {
236     iter->Get(&min, &max, &count);
237     if (count == 0)
238       continue;
239     if (strict_cast<int64_t>(min) + 1 != max)
240       return false;  // SparseHistogram only supports bucket with size 1.
241 
242     // We have to do the following atomically, because even if the caller is
243     // using a lock, a separate process (that is not aware of this lock) may
244     // concurrently modify the value.
245     subtle::Barrier_AtomicIncrement(
246         GetOrCreateSampleCountStorage(min),
247         (op == HistogramSamples::ADD) ? count : -count);
248   }
249   return true;
250 }
251 
GetSampleCountStorage(Sample value)252 Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
253   // If |value| is already in the map, just return that.
254   auto it = sample_counts_.find(value);
255   if (it != sample_counts_.end())
256     return it->second;
257 
258   // Import any new samples from persistent memory looking for the value.
259   return ImportSamples(/*until_value=*/value);
260 }
261 
GetOrCreateSampleCountStorage(Sample value)262 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
263   // Get any existing count storage.
264   Count* count_pointer = GetSampleCountStorage(value);
265   if (count_pointer)
266     return count_pointer;
267 
268   // Create a new record in persistent memory for the value. |records_| will
269   // have been initialized by the GetSampleCountStorage() call above.
270   CHECK(records_);
271   PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
272   if (!ref) {
273     // If a new record could not be created then the underlying allocator is
274     // full or corrupt. Instead, allocate the counter from the heap. This
275     // sample will not be persistent, will not be shared, and will leak...
276     // but it's better than crashing.
277     count_pointer = new Count(0);
278     sample_counts_[value] = count_pointer;
279     return count_pointer;
280   }
281 
282   // A race condition between two independent processes (i.e. two independent
283   // histogram objects sharing the same sample data) could cause two of the
284   // above records to be created. The allocator, however, forces a strict
285   // ordering on iterable objects so use the import method to actually add the
286   // just-created record. This ensures that all PersistentSampleMap objects
287   // will always use the same record, whichever was first made iterable.
288   // Thread-safety within a process where multiple threads use the same
289   // histogram object is delegated to the controlling histogram object which,
290   // for sparse histograms, is a lock object.
291   count_pointer = ImportSamples(/*until_value=*/value);
292   DCHECK(count_pointer);
293   return count_pointer;
294 }
295 
GetRecords()296 PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
297   // The |records_| pointer is lazily fetched from the |allocator_| only on
298   // first use. Sometimes duplicate histograms are created by race conditions
299   // and if both were to grab the records object, there would be a conflict.
300   // Use of a histogram, and thus a call to this method, won't occur until
301   // after the histogram has been de-dup'd.
302   if (!records_) {
303     records_ = allocator_->CreateSampleMapRecords(id());
304   }
305   return records_.get();
306 }
307 
ImportSamples(std::optional<Sample> until_value)308 Count* PersistentSampleMap::ImportSamples(std::optional<Sample> until_value) {
309   std::vector<PersistentMemoryAllocator::Reference> refs;
310   PersistentSampleMapRecords* records = GetRecords();
311   while (!(refs = records->GetNextRecords(until_value)).empty()) {
312     // GetNextRecords() returns a list of new unseen records belonging to this
313     // map. Iterate through them all and store them internally. Note that if
314     // |until_value| was found, it will be the last element in |refs|.
315     for (auto ref : refs) {
316       SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
317       if (!record) {
318         continue;
319       }
320 
321       DCHECK_EQ(id(), record->id);
322 
323       // Check if the record's value is already known.
324       if (!Contains(sample_counts_, record->value)) {
325         // No: Add it to map of known values.
326         sample_counts_[record->value] = &record->count;
327       } else {
328         // Yes: Ignore it; it's a duplicate caused by a race condition -- see
329         // code & comment in GetOrCreateSampleCountStorage() for details.
330         // Check that nothing ever operated on the duplicate record.
331         DCHECK_EQ(0, record->count);
332       }
333 
334       // Check if it's the value being searched for and, if so, stop here.
335       // Because race conditions can cause multiple records for a single value,
336       // be sure to return the first one found.
337       if (until_value.has_value() && record->value == until_value.value()) {
338         // Ensure that this was the last value in |refs|.
339         CHECK_EQ(refs.back(), ref);
340 
341         return &record->count;
342       }
343     }
344   }
345 
346   return nullptr;
347 }
348 
349 }  // namespace base
350