xref: /aosp_15_r20/external/cronet/base/profiler/metadata_recorder.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/profiler/metadata_recorder.h"
6 
7 #include <optional>
8 
9 #include "base/metrics/histogram_macros.h"
10 
11 namespace base {
12 
13 const size_t MetadataRecorder::MAX_METADATA_COUNT;
14 
Item(uint64_t name_hash,std::optional<int64_t> key,std::optional<PlatformThreadId> thread_id,int64_t value)15 MetadataRecorder::Item::Item(uint64_t name_hash,
16                              std::optional<int64_t> key,
17                              std::optional<PlatformThreadId> thread_id,
18                              int64_t value)
19     : name_hash(name_hash), key(key), thread_id(thread_id), value(value) {}
20 
Item()21 MetadataRecorder::Item::Item() : name_hash(0), value(0) {}
22 
23 MetadataRecorder::Item::Item(const Item& other) = default;
24 
25 MetadataRecorder::Item& MetadataRecorder::Item::Item::operator=(
26     const Item& other) = default;
27 
28 MetadataRecorder::ItemInternal::ItemInternal() = default;
29 
30 MetadataRecorder::ItemInternal::~ItemInternal() = default;
31 
MetadataRecorder()32 MetadataRecorder::MetadataRecorder() {
33   // Ensure that we have necessary atomic support.
34   DCHECK(items_[0].is_active.is_lock_free());
35   DCHECK(items_[0].value.is_lock_free());
36 }
37 
38 MetadataRecorder::~MetadataRecorder() = default;
39 
Set(uint64_t name_hash,std::optional<int64_t> key,std::optional<PlatformThreadId> thread_id,int64_t value)40 void MetadataRecorder::Set(uint64_t name_hash,
41                            std::optional<int64_t> key,
42                            std::optional<PlatformThreadId> thread_id,
43                            int64_t value) {
44   AutoLock lock(write_lock_);
45 
46   // Acquiring the |write_lock_| ensures that:
47   //
48   //   - We don't try to write into the same new slot at the same time as
49   //     another thread
50   //   - We see all writes by other threads (acquiring a mutex implies acquire
51   //     semantics)
52   size_t item_slots_used = item_slots_used_.load(std::memory_order_relaxed);
53   for (size_t i = 0; i < item_slots_used; ++i) {
54     auto& item = items_[i];
55     if (item.name_hash == name_hash && item.key == key &&
56         item.thread_id == thread_id) {
57       item.value.store(value, std::memory_order_relaxed);
58 
59       const bool was_active =
60           item.is_active.exchange(true, std::memory_order_release);
61       if (!was_active)
62         inactive_item_count_--;
63 
64       return;
65     }
66   }
67 
68   item_slots_used = TryReclaimInactiveSlots(item_slots_used);
69 
70   if (item_slots_used == items_.size()) {
71     // The metadata recorder is full, forcing us to drop this metadata. The
72     // above UMA histogram counting occupied metadata slots should help us set a
73     // max size that avoids this condition during normal Chrome use.
74     return;
75   }
76 
77   // Wait until the item is fully created before setting |is_active| to true and
78   // incrementing |item_slots_used_|, which will signal to readers that the item
79   // is ready.
80   auto& item = items_[item_slots_used];
81   item.name_hash = name_hash;
82   item.key = key;
83   item.thread_id = thread_id;
84   item.value.store(value, std::memory_order_relaxed);
85   item.is_active.store(true, std::memory_order_release);
86   item_slots_used_.fetch_add(1, std::memory_order_release);
87 }
88 
Remove(uint64_t name_hash,std::optional<int64_t> key,std::optional<PlatformThreadId> thread_id)89 void MetadataRecorder::Remove(uint64_t name_hash,
90                               std::optional<int64_t> key,
91                               std::optional<PlatformThreadId> thread_id) {
92   AutoLock lock(write_lock_);
93 
94   size_t item_slots_used = item_slots_used_.load(std::memory_order_relaxed);
95   for (size_t i = 0; i < item_slots_used; ++i) {
96     auto& item = items_[i];
97     if (item.name_hash == name_hash && item.key == key &&
98         item.thread_id == thread_id) {
99       // A removed item will occupy its slot until that slot is reclaimed.
100       const bool was_active =
101           item.is_active.exchange(false, std::memory_order_relaxed);
102       if (was_active)
103         inactive_item_count_++;
104 
105       return;
106     }
107   }
108 }
109 
MetadataProvider(MetadataRecorder * metadata_recorder,PlatformThreadId thread_id)110 MetadataRecorder::MetadataProvider::MetadataProvider(
111     MetadataRecorder* metadata_recorder,
112     PlatformThreadId thread_id)
113     : metadata_recorder_(metadata_recorder),
114       thread_id_(thread_id),
115       auto_lock_(metadata_recorder->read_lock_) {}
116 
117 MetadataRecorder::MetadataProvider::~MetadataProvider() = default;
118 
GetItems(ItemArray * const items) const119 size_t MetadataRecorder::MetadataProvider::GetItems(
120     ItemArray* const items) const {
121   return metadata_recorder_->GetItems(items, thread_id_);
122 }
123 
GetItems(ItemArray * const items,PlatformThreadId thread_id) const124 size_t MetadataRecorder::GetItems(ItemArray* const items,
125                                   PlatformThreadId thread_id) const {
126   // If a writer adds a new item after this load, it will be ignored.  We do
127   // this instead of calling item_slots_used_.load() explicitly in the for loop
128   // bounds checking, which would be expensive.
129   //
130   // Also note that items are snapshotted sequentially and that items can be
131   // modified mid-snapshot by non-suspended threads. This means that there's a
132   // small chance that some items, especially those that occur later in the
133   // array, may have values slightly "in the future" from when the sample was
134   // actually collected. It also means that the array as returned may have never
135   // existed in its entirety, although each name/value pair represents a
136   // consistent item that existed very shortly after the thread was supended.
137   size_t item_slots_used = item_slots_used_.load(std::memory_order_acquire);
138   size_t write_index = 0;
139   for (size_t read_index = 0; read_index < item_slots_used; ++read_index) {
140     const auto& item = items_[read_index];
141     // Because we wait until |is_active| is set to consider an item active and
142     // that field is always set last, we ignore half-created items.
143     if (item.is_active.load(std::memory_order_acquire) &&
144         (!item.thread_id.has_value() || item.thread_id == thread_id)) {
145       (*items)[write_index++] =
146           Item{item.name_hash, item.key, item.thread_id,
147                item.value.load(std::memory_order_relaxed)};
148     }
149   }
150 
151   return write_index;
152 }
153 
TryReclaimInactiveSlots(size_t item_slots_used)154 size_t MetadataRecorder::TryReclaimInactiveSlots(size_t item_slots_used) {
155   const size_t remaining_slots = MAX_METADATA_COUNT - item_slots_used;
156 
157   if (inactive_item_count_ == 0 || inactive_item_count_ < remaining_slots) {
158     // This reclaiming threshold has a few nice properties:
159     //
160     //   - It avoids reclaiming when no items have been removed
161     //   - It makes doing so more likely as free slots become more scarce
162     //   - It makes doing so less likely when the benefits are lower
163     return item_slots_used;
164   }
165 
166   if (read_lock_.Try()) {
167     // The lock isn't already held by a reader or another thread reclaiming
168     // slots.
169     item_slots_used = ReclaimInactiveSlots(item_slots_used);
170     read_lock_.Release();
171   }
172 
173   return item_slots_used;
174 }
175 
ReclaimInactiveSlots(size_t item_slots_used)176 size_t MetadataRecorder::ReclaimInactiveSlots(size_t item_slots_used) {
177   // From here until the end of the reclamation, we can safely use
178   // memory_order_relaxed for all reads and writes. We don't need
179   // memory_order_acquire because acquiring the write mutex gives acquire
180   // semantics and no other threads can write after we hold that mutex. We don't
181   // need memory_order_release because no readers can read until we release the
182   // read mutex, which itself has release semantics.
183   size_t first_inactive_item_idx = 0;
184   size_t last_active_item_idx = item_slots_used - 1;
185   while (first_inactive_item_idx < last_active_item_idx) {
186     ItemInternal& inactive_item = items_[first_inactive_item_idx];
187     ItemInternal& active_item = items_[last_active_item_idx];
188 
189     if (inactive_item.is_active.load(std::memory_order_relaxed)) {
190       // Keep seeking forward to an inactive item.
191       ++first_inactive_item_idx;
192       continue;
193     }
194 
195     if (!active_item.is_active.load(std::memory_order_relaxed)) {
196       // Keep seeking backward to an active item. Skipping over this item
197       // indicates that we're freeing the slot at this index.
198       --last_active_item_idx;
199       item_slots_used--;
200       continue;
201     }
202 
203     inactive_item.name_hash = active_item.name_hash;
204     inactive_item.value.store(active_item.value.load(std::memory_order_relaxed),
205                               std::memory_order_relaxed);
206     inactive_item.is_active.store(true, std::memory_order_relaxed);
207 
208     ++first_inactive_item_idx;
209     --last_active_item_idx;
210     item_slots_used--;
211   }
212 
213   item_slots_used_.store(item_slots_used, std::memory_order_relaxed);
214   return item_slots_used;
215 }
216 }  // namespace base
217