xref: /aosp_15_r20/external/cronet/base/trace_event/trace_buffer.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/trace_buffer.h"
6 
7 #include <memory>
8 #include <utility>
9 #include <vector>
10 
11 #include "base/functional/bind.h"
12 #include "base/trace_event/heap_profiler.h"
13 #include "base/trace_event/trace_event_impl.h"
14 
15 namespace base {
16 namespace trace_event {
17 
18 namespace {
19 
20 class TraceBufferRingBuffer : public TraceBuffer {
21  public:
TraceBufferRingBuffer(size_t max_chunks)22   TraceBufferRingBuffer(size_t max_chunks)
23       : max_chunks_(max_chunks),
24         recyclable_chunks_queue_(new size_t[queue_capacity()]),
25         queue_head_(0),
26         queue_tail_(max_chunks),
27         current_iteration_index_(0),
28         current_chunk_seq_(1) {
29     chunks_.reserve(max_chunks);
30     for (size_t i = 0; i < max_chunks; ++i)
31       recyclable_chunks_queue_[i] = i;
32   }
33 
34   TraceBufferRingBuffer(const TraceBufferRingBuffer&) = delete;
35   TraceBufferRingBuffer& operator=(const TraceBufferRingBuffer&) = delete;
36 
GetChunk(size_t * index)37   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
38     HEAP_PROFILER_SCOPED_IGNORE;
39 
40     // Because the number of threads is much less than the number of chunks,
41     // the queue should never be empty.
42     DCHECK(!QueueIsEmpty());
43 
44     *index = recyclable_chunks_queue_[queue_head_];
45     queue_head_ = NextQueueIndex(queue_head_);
46     current_iteration_index_ = queue_head_;
47 
48     if (*index >= chunks_.size())
49       chunks_.resize(*index + 1);
50 
51     TraceBufferChunk* chunk = chunks_[*index].release();
52     chunks_[*index] = nullptr;  // Put nullptr in the slot of a in-flight chunk.
53     if (chunk)
54       chunk->Reset(current_chunk_seq_++);
55     else
56       chunk = new TraceBufferChunk(current_chunk_seq_++);
57 
58     return std::unique_ptr<TraceBufferChunk>(chunk);
59   }
60 
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)61   void ReturnChunk(size_t index,
62                    std::unique_ptr<TraceBufferChunk> chunk) override {
63     // When this method is called, the queue should not be full because it
64     // can contain all chunks including the one to be returned.
65     DCHECK(!QueueIsFull());
66     DCHECK(chunk);
67     DCHECK_LT(index, chunks_.size());
68     DCHECK(!chunks_[index]);
69     chunks_[index] = std::move(chunk);
70     recyclable_chunks_queue_[queue_tail_] = index;
71     queue_tail_ = NextQueueIndex(queue_tail_);
72   }
73 
IsFull() const74   bool IsFull() const override { return false; }
75 
Size() const76   size_t Size() const override {
77     // This is approximate because not all of the chunks are full.
78     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
79   }
80 
Capacity() const81   size_t Capacity() const override {
82     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
83   }
84 
GetEventByHandle(TraceEventHandle handle)85   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
86     if (handle.chunk_index >= chunks_.size())
87       return nullptr;
88     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
89     if (!chunk || chunk->seq() != handle.chunk_seq)
90       return nullptr;
91     return chunk->GetEventAt(handle.event_index);
92   }
93 
NextChunk()94   const TraceBufferChunk* NextChunk() override {
95     if (chunks_.empty())
96       return nullptr;
97 
98     while (current_iteration_index_ != queue_tail_) {
99       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
100       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
101       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
102         continue;
103       DCHECK(chunks_[chunk_index]);
104       return chunks_[chunk_index].get();
105     }
106     return nullptr;
107   }
108 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)109   void EstimateTraceMemoryOverhead(
110       TraceEventMemoryOverhead* overhead) override {
111     overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
112     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
113          queue_index = NextQueueIndex(queue_index)) {
114       size_t chunk_index = recyclable_chunks_queue_[queue_index];
115       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
116         continue;
117       chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
118     }
119   }
120 
121  private:
QueueIsEmpty() const122   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
123 
QueueSize() const124   size_t QueueSize() const {
125     return queue_tail_ > queue_head_
126                ? queue_tail_ - queue_head_
127                : queue_tail_ + queue_capacity() - queue_head_;
128   }
129 
QueueIsFull() const130   bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
131 
queue_capacity() const132   size_t queue_capacity() const {
133     // One extra space to help distinguish full state and empty state.
134     return max_chunks_ + 1;
135   }
136 
NextQueueIndex(size_t index) const137   size_t NextQueueIndex(size_t index) const {
138     index++;
139     if (index >= queue_capacity())
140       index = 0;
141     return index;
142   }
143 
144   size_t max_chunks_;
145   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
146 
147   std::unique_ptr<size_t[]> recyclable_chunks_queue_;
148   size_t queue_head_;
149   size_t queue_tail_;
150 
151   size_t current_iteration_index_;
152   uint32_t current_chunk_seq_;
153 };
154 
155 class TraceBufferVector : public TraceBuffer {
156  public:
TraceBufferVector(size_t max_chunks)157   TraceBufferVector(size_t max_chunks)
158       : in_flight_chunk_count_(0),
159         current_iteration_index_(0),
160         max_chunks_(max_chunks) {
161     chunks_.reserve(max_chunks_);
162   }
163 
164   TraceBufferVector(const TraceBufferVector&) = delete;
165   TraceBufferVector& operator=(const TraceBufferVector&) = delete;
166 
GetChunk(size_t * index)167   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
168     HEAP_PROFILER_SCOPED_IGNORE;
169 
170     // This function may be called when adding normal events or indirectly from
171     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
172     // have to add the metadata events and flush thread-local buffers even if
173     // the buffer is full.
174     *index = chunks_.size();
175     // Put nullptr in the slot of a in-flight chunk.
176     chunks_.push_back(nullptr);
177     ++in_flight_chunk_count_;
178     // + 1 because zero chunk_seq is not allowed.
179     return std::make_unique<TraceBufferChunk>(static_cast<uint32_t>(*index) +
180                                               1);
181   }
182 
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)183   void ReturnChunk(size_t index,
184                    std::unique_ptr<TraceBufferChunk> chunk) override {
185     DCHECK_GT(in_flight_chunk_count_, 0u);
186     DCHECK_LT(index, chunks_.size());
187     DCHECK(!chunks_[index]);
188     --in_flight_chunk_count_;
189     chunks_[index] = std::move(chunk);
190   }
191 
IsFull() const192   bool IsFull() const override { return chunks_.size() >= max_chunks_; }
193 
Size() const194   size_t Size() const override {
195     // This is approximate because not all of the chunks are full.
196     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
197   }
198 
Capacity() const199   size_t Capacity() const override {
200     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
201   }
202 
GetEventByHandle(TraceEventHandle handle)203   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
204     if (handle.chunk_index >= chunks_.size())
205       return nullptr;
206     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
207     if (!chunk || chunk->seq() != handle.chunk_seq)
208       return nullptr;
209     return chunk->GetEventAt(handle.event_index);
210   }
211 
NextChunk()212   const TraceBufferChunk* NextChunk() override {
213     while (current_iteration_index_ < chunks_.size()) {
214       // Skip in-flight chunks.
215       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
216       if (chunk)
217         return chunk;
218     }
219     return nullptr;
220   }
221 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)222   void EstimateTraceMemoryOverhead(
223       TraceEventMemoryOverhead* overhead) override {
224     const size_t chunks_ptr_vector_allocated_size =
225         sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
226     const size_t chunks_ptr_vector_resident_size =
227         sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
228     overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
229                   chunks_ptr_vector_allocated_size,
230                   chunks_ptr_vector_resident_size);
231     for (size_t i = 0; i < chunks_.size(); ++i) {
232       TraceBufferChunk* chunk = chunks_[i].get();
233       // Skip the in-flight (nullptr) chunks. They will be accounted by the
234       // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
235       if (chunk)
236         chunk->EstimateTraceMemoryOverhead(overhead);
237     }
238   }
239 
240  private:
241   size_t in_flight_chunk_count_;
242   size_t current_iteration_index_;
243   size_t max_chunks_;
244   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
245 };
246 
247 }  // namespace
248 
TraceBufferChunk(uint32_t seq)249 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
250 
251 TraceBufferChunk::~TraceBufferChunk() = default;
252 
Reset(uint32_t new_seq)253 void TraceBufferChunk::Reset(uint32_t new_seq) {
254   for (size_t i = 0; i < next_free_; ++i)
255     chunk_[i].Reset();
256   next_free_ = 0;
257   seq_ = new_seq;
258   cached_overhead_estimate_.reset();
259 }
260 
AddTraceEvent(size_t * event_index)261 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
262   DCHECK(!IsFull());
263   *event_index = next_free_++;
264   return &chunk_[*event_index];
265 }
266 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)267 void TraceBufferChunk::EstimateTraceMemoryOverhead(
268     TraceEventMemoryOverhead* overhead) {
269   if (!cached_overhead_estimate_) {
270     cached_overhead_estimate_ = std::make_unique<TraceEventMemoryOverhead>();
271 
272     // When estimating the size of TraceBufferChunk, exclude the array of trace
273     // events, as they are computed individually below.
274     cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
275                                    sizeof(*this) - sizeof(chunk_));
276   }
277 
278   const size_t num_cached_estimated_events =
279       cached_overhead_estimate_->GetCount(
280           TraceEventMemoryOverhead::kTraceEvent);
281   DCHECK_LE(num_cached_estimated_events, size());
282 
283   if (IsFull() && num_cached_estimated_events == size()) {
284     overhead->Update(*cached_overhead_estimate_);
285     return;
286   }
287 
288   for (size_t i = num_cached_estimated_events; i < size(); ++i)
289     chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
290 
291   if (IsFull()) {
292     cached_overhead_estimate_->AddSelf();
293   } else {
294     // The unused TraceEvents in |chunks_| are not cached. They will keep
295     // changing as new TraceEvents are added to this chunk, so they are
296     // computed on the fly.
297     const size_t num_unused_trace_events = capacity() - size();
298     overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
299                   num_unused_trace_events * sizeof(TraceEvent));
300   }
301 
302   overhead->Update(*cached_overhead_estimate_);
303 }
304 
305 TraceResultBuffer::OutputCallback
GetCallback()306 TraceResultBuffer::SimpleOutput::GetCallback() {
307   return BindRepeating(&SimpleOutput::Append, Unretained(this));
308 }
309 
Append(const std::string & json_trace_output)310 void TraceResultBuffer::SimpleOutput::Append(
311     const std::string& json_trace_output) {
312   json_output += json_trace_output;
313 }
314 
TraceResultBuffer()315 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
316 
317 TraceResultBuffer::~TraceResultBuffer() = default;
318 
SetOutputCallback(OutputCallback json_chunk_callback)319 void TraceResultBuffer::SetOutputCallback(OutputCallback json_chunk_callback) {
320   output_callback_ = std::move(json_chunk_callback);
321 }
322 
Start()323 void TraceResultBuffer::Start() {
324   append_comma_ = false;
325   output_callback_.Run("[");
326 }
327 
AddFragment(const std::string & trace_fragment)328 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
329   if (append_comma_)
330     output_callback_.Run(",");
331   append_comma_ = true;
332   output_callback_.Run(trace_fragment);
333 }
334 
Finish()335 void TraceResultBuffer::Finish() {
336   output_callback_.Run("]");
337 }
338 
CreateTraceBufferRingBuffer(size_t max_chunks)339 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
340   return new TraceBufferRingBuffer(max_chunks);
341 }
342 
CreateTraceBufferVectorOfSize(size_t max_chunks)343 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
344   return new TraceBufferVector(max_chunks);
345 }
346 
347 }  // namespace trace_event
348 }  // namespace base
349