xref: /aosp_15_r20/external/perfetto/src/profiling/memory/unwinding.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/profiling/memory/unwinding.h"
18 
19 #include <sys/types.h>
20 #include <unistd.h>
21 
22 #include <condition_variable>
23 #include <mutex>
24 
25 #include <unwindstack/MachineArm.h>
26 #include <unwindstack/MachineArm64.h>
27 #include <unwindstack/MachineRiscv64.h>
28 #include <unwindstack/MachineX86.h>
29 #include <unwindstack/MachineX86_64.h>
30 #include <unwindstack/Maps.h>
31 #include <unwindstack/Memory.h>
32 #include <unwindstack/Regs.h>
33 #include <unwindstack/RegsArm.h>
34 #include <unwindstack/RegsArm64.h>
35 #include <unwindstack/RegsRiscv64.h>
36 #include <unwindstack/RegsX86.h>
37 #include <unwindstack/RegsX86_64.h>
38 #include <unwindstack/Unwinder.h>
39 #include <unwindstack/UserArm.h>
40 #include <unwindstack/UserArm64.h>
41 #include <unwindstack/UserRiscv64.h>
42 #include <unwindstack/UserX86.h>
43 #include <unwindstack/UserX86_64.h>
44 
45 #include <procinfo/process_map.h>
46 
47 #include "perfetto/base/logging.h"
48 #include "perfetto/base/task_runner.h"
49 #include "perfetto/ext/base/file_utils.h"
50 #include "perfetto/ext/base/scoped_file.h"
51 #include "perfetto/ext/base/string_utils.h"
52 #include "perfetto/ext/base/thread_task_runner.h"
53 
54 #include "src/profiling/memory/unwound_messages.h"
55 #include "src/profiling/memory/wire_protocol.h"
56 
57 namespace perfetto {
58 namespace profiling {
59 namespace {
60 
61 constexpr base::TimeMillis kMapsReparseInterval{500};
62 constexpr uint32_t kRetryDelayMs = 100;
63 
64 constexpr size_t kMaxFrames = 500;
65 
66 // We assume average ~300us per unwind. If we handle up to 1000 unwinds, this
67 // makes sure other tasks get to be run at least every 300ms if the unwinding
68 // saturates this thread.
69 constexpr size_t kUnwindBatchSize = 1000;
70 constexpr size_t kRecordBatchSize = 1024;
71 constexpr size_t kMaxAllocRecordArenaSize = 2 * kRecordBatchSize;
72 
73 #pragma GCC diagnostic push
74 // We do not care about deterministic destructor order.
75 #pragma GCC diagnostic ignored "-Wglobal-constructors"
76 #pragma GCC diagnostic ignored "-Wexit-time-destructors"
77 static std::vector<std::string> kSkipMaps{"heapprofd_client.so",
78                                           "heapprofd_client_api.so"};
79 #pragma GCC diagnostic pop
80 
GetRegsSize(unwindstack::Regs * regs)81 size_t GetRegsSize(unwindstack::Regs* regs) {
82   if (regs->Is32Bit())
83     return sizeof(uint32_t) * regs->total_regs();
84   return sizeof(uint64_t) * regs->total_regs();
85 }
86 
ReadFromRawData(unwindstack::Regs * regs,void * raw_data)87 void ReadFromRawData(unwindstack::Regs* regs, void* raw_data) {
88   memcpy(regs->RawData(), raw_data, GetRegsSize(regs));
89 }
90 
91 }  // namespace
92 
CreateRegsFromRawData(unwindstack::ArchEnum arch,void * raw_data)93 std::unique_ptr<unwindstack::Regs> CreateRegsFromRawData(
94     unwindstack::ArchEnum arch,
95     void* raw_data) {
96   std::unique_ptr<unwindstack::Regs> ret;
97   switch (arch) {
98     case unwindstack::ARCH_X86:
99       ret.reset(new unwindstack::RegsX86());
100       break;
101     case unwindstack::ARCH_X86_64:
102       ret.reset(new unwindstack::RegsX86_64());
103       break;
104     case unwindstack::ARCH_ARM:
105       ret.reset(new unwindstack::RegsArm());
106       break;
107     case unwindstack::ARCH_ARM64:
108       ret.reset(new unwindstack::RegsArm64());
109       break;
110     case unwindstack::ARCH_RISCV64:
111       ret.reset(new unwindstack::RegsRiscv64());
112       break;
113     case unwindstack::ARCH_UNKNOWN:
114       break;
115   }
116   if (ret)
117     ReadFromRawData(ret.get(), raw_data);
118   return ret;
119 }
120 
DoUnwind(WireMessage * msg,UnwindingMetadata * metadata,AllocRecord * out)121 bool DoUnwind(WireMessage* msg, UnwindingMetadata* metadata, AllocRecord* out) {
122   AllocMetadata* alloc_metadata = msg->alloc_header;
123   std::unique_ptr<unwindstack::Regs> regs(CreateRegsFromRawData(
124       alloc_metadata->arch, alloc_metadata->register_data));
125   if (regs == nullptr) {
126     PERFETTO_DLOG("Unable to construct unwindstack::Regs");
127     unwindstack::FrameData frame_data{};
128     frame_data.function_name = "ERROR READING REGISTERS";
129 
130     out->frames.clear();
131     out->build_ids.clear();
132     out->frames.emplace_back(std::move(frame_data));
133     out->build_ids.emplace_back("");
134     out->error = true;
135     return false;
136   }
137   uint8_t* stack = reinterpret_cast<uint8_t*>(msg->payload);
138   std::shared_ptr<unwindstack::Memory> mems =
139       std::make_shared<StackOverlayMemory>(metadata->fd_mem,
140                                            alloc_metadata->stack_pointer, stack,
141                                            msg->payload_size);
142 
143   unwindstack::Unwinder unwinder(kMaxFrames, &metadata->fd_maps, regs.get(),
144                                  mems);
145 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
146   unwinder.SetJitDebug(metadata->GetJitDebug(regs->Arch()));
147   unwinder.SetDexFiles(metadata->GetDexFiles(regs->Arch()));
148 #endif
149   // Suppress incorrect "variable may be uninitialized" error for if condition
150   // after this loop. error_code = LastErrorCode gets run at least once.
151   unwindstack::ErrorCode error_code = unwindstack::ERROR_NONE;
152   for (int attempt = 0; attempt < 2; ++attempt) {
153     if (attempt > 0) {
154       if (metadata->last_maps_reparse_time + kMapsReparseInterval >
155           base::GetWallTimeMs()) {
156         PERFETTO_DLOG("Skipping reparse due to rate limit.");
157         break;
158       }
159       PERFETTO_DLOG("Reparsing maps");
160       metadata->ReparseMaps();
161       metadata->last_maps_reparse_time = base::GetWallTimeMs();
162       // Regs got invalidated by libuwindstack's speculative jump.
163       // Reset.
164       ReadFromRawData(regs.get(), alloc_metadata->register_data);
165       out->reparsed_map = true;
166 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
167       unwinder.SetJitDebug(metadata->GetJitDebug(regs->Arch()));
168       unwinder.SetDexFiles(metadata->GetDexFiles(regs->Arch()));
169 #endif
170     }
171     out->frames.swap(unwinder.frames());  // Provide the unwinder buffer to use.
172     unwinder.Unwind(&kSkipMaps, /*map_suffixes_to_ignore=*/nullptr);
173     out->frames.swap(unwinder.frames());  // Take the buffer back.
174     error_code = unwinder.LastErrorCode();
175     if (error_code != unwindstack::ERROR_INVALID_MAP &&
176         (unwinder.warnings() & unwindstack::WARNING_DEX_PC_NOT_IN_MAP) == 0) {
177       break;
178     }
179   }
180   out->build_ids.resize(out->frames.size());
181   for (size_t i = 0; i < out->frames.size(); ++i) {
182     out->build_ids[i] = metadata->GetBuildId(out->frames[i]);
183   }
184 
185   if (error_code != unwindstack::ERROR_NONE) {
186     PERFETTO_DLOG("Unwinding error %" PRIu8, error_code);
187     unwindstack::FrameData frame_data{};
188     frame_data.function_name =
189         "ERROR " + StringifyLibUnwindstackError(error_code);
190 
191     out->frames.emplace_back(std::move(frame_data));
192     out->build_ids.emplace_back("");
193     out->error = true;
194   }
195   return true;
196 }
197 
~UnwindingWorker()198 UnwindingWorker::~UnwindingWorker() {
199   if (thread_task_runner_.get() == nullptr) {
200     return;
201   }
202   std::mutex mutex;
203   std::condition_variable cv;
204 
205   std::unique_lock<std::mutex> lock(mutex);
206   bool done = false;
207   thread_task_runner_.PostTask([&mutex, &cv, &done, this] {
208     for (auto& it : client_data_) {
209       auto& client_data = it.second;
210       client_data.sock->Shutdown(false);
211     }
212     client_data_.clear();
213 
214     std::lock_guard<std::mutex> inner_lock(mutex);
215     done = true;
216     cv.notify_one();
217   });
218   cv.wait(lock, [&done] { return done; });
219 }
220 
OnDisconnect(base::UnixSocket * self)221 void UnwindingWorker::OnDisconnect(base::UnixSocket* self) {
222   pid_t peer_pid = self->peer_pid_linux();
223   auto it = client_data_.find(peer_pid);
224   if (it == client_data_.end()) {
225     PERFETTO_DFATAL_OR_ELOG("Disconnected unexpected socket.");
226     return;
227   }
228 
229   ClientData& client_data = it->second;
230   SharedRingBuffer& shmem = client_data.shmem;
231   client_data.drain_bytes = shmem.read_avail();
232 
233   if (client_data.drain_bytes != 0) {
234     DrainJob(peer_pid);
235   } else {
236     FinishDisconnect(it);
237   }
238 }
239 
RemoveClientData(std::map<pid_t,ClientData>::iterator client_data_iterator)240 void UnwindingWorker::RemoveClientData(
241     std::map<pid_t, ClientData>::iterator client_data_iterator) {
242   client_data_.erase(client_data_iterator);
243   if (client_data_.empty()) {
244     // We got rid of the last client. Flush and destruct AllocRecords in
245     // arena. Disable the arena (will not accept returning borrowed records)
246     // in case there are pending AllocRecords on the main thread.
247     alloc_record_arena_.Disable();
248   }
249 }
250 
FinishDisconnect(std::map<pid_t,ClientData>::iterator client_data_iterator)251 void UnwindingWorker::FinishDisconnect(
252     std::map<pid_t, ClientData>::iterator client_data_iterator) {
253   pid_t peer_pid = client_data_iterator->first;
254   ClientData& client_data = client_data_iterator->second;
255   SharedRingBuffer& shmem = client_data.shmem;
256 
257   if (!client_data.free_records.empty()) {
258     delegate_->PostFreeRecord(this, std::move(client_data.free_records));
259   }
260 
261   SharedRingBuffer::Stats stats = {};
262   {
263     auto lock = shmem.AcquireLock(ScopedSpinlock::Mode::Try);
264     if (lock.locked())
265       stats = shmem.GetStats(lock);
266     else
267       PERFETTO_ELOG("Failed to log shmem to get stats.");
268   }
269   DataSourceInstanceID ds_id = client_data.data_source_instance_id;
270 
271   RemoveClientData(client_data_iterator);
272   delegate_->PostSocketDisconnected(this, ds_id, peer_pid, stats);
273 }
274 
OnDataAvailable(base::UnixSocket * self)275 void UnwindingWorker::OnDataAvailable(base::UnixSocket* self) {
276   // Drain buffer to clear the notification.
277   char recv_buf[kUnwindBatchSize];
278   self->Receive(recv_buf, sizeof(recv_buf));
279   BatchUnwindJob(self->peer_pid_linux());
280 }
281 
ReadAndUnwindBatch(ClientData * client_data)282 UnwindingWorker::ReadAndUnwindBatchResult UnwindingWorker::ReadAndUnwindBatch(
283     ClientData* client_data) {
284   SharedRingBuffer& shmem = client_data->shmem;
285   SharedRingBuffer::Buffer buf;
286   ReadAndUnwindBatchResult res;
287 
288   size_t i;
289   for (i = 0; i < kUnwindBatchSize; ++i) {
290     uint64_t reparses_before = client_data->metadata.reparses;
291     buf = shmem.BeginRead();
292     if (!buf)
293       break;
294     HandleBuffer(this, &alloc_record_arena_, buf, client_data,
295                  client_data->sock->peer_pid_linux(), delegate_);
296     res.bytes_read += shmem.EndRead(std::move(buf));
297     // Reparsing takes time, so process the rest in a new batch to avoid timing
298     // out.
299     if (reparses_before < client_data->metadata.reparses) {
300       res.status = ReadAndUnwindBatchResult::Status::kHasMore;
301       return res;
302     }
303   }
304 
305   if (i == kUnwindBatchSize) {
306     res.status = ReadAndUnwindBatchResult::Status::kHasMore;
307   } else if (i > 0) {
308     res.status = ReadAndUnwindBatchResult::Status::kReadSome;
309   } else {
310     res.status = ReadAndUnwindBatchResult::Status::kReadNone;
311   }
312   return res;
313 }
314 
BatchUnwindJob(pid_t peer_pid)315 void UnwindingWorker::BatchUnwindJob(pid_t peer_pid) {
316   auto it = client_data_.find(peer_pid);
317   if (it == client_data_.end()) {
318     // This can happen if the client disconnected before the buffer was fully
319     // handled.
320     PERFETTO_DLOG("Unexpected data.");
321     return;
322   }
323   ClientData& client_data = it->second;
324   if (client_data.drain_bytes != 0) {
325     // This process disconnected and we're reading out the remainder of its
326     // buffered data in a dedicated recurring task (DrainJob), so this task has
327     // nothing to do.
328     return;
329   }
330 
331   bool job_reposted = false;
332   bool reader_paused = false;
333   switch (ReadAndUnwindBatch(&client_data).status) {
334     case ReadAndUnwindBatchResult::Status::kHasMore:
335       thread_task_runner_.get()->PostTask(
336           [this, peer_pid] { BatchUnwindJob(peer_pid); });
337       job_reposted = true;
338       break;
339     case ReadAndUnwindBatchResult::Status::kReadSome:
340       thread_task_runner_.get()->PostDelayedTask(
341           [this, peer_pid] { BatchUnwindJob(peer_pid); }, kRetryDelayMs);
342       job_reposted = true;
343       break;
344     case ReadAndUnwindBatchResult::Status::kReadNone:
345       client_data.shmem.SetReaderPaused();
346       reader_paused = true;
347       break;
348   }
349 
350   // We need to either repost the job, or set the reader paused bit. By
351   // setting that bit, we inform the client that we want to be notified when
352   // new data is written to the shared memory buffer.
353   // If we do neither of these things, we will not read from the shared memory
354   // buffer again.
355   PERFETTO_CHECK(job_reposted || reader_paused);
356 }
357 
DrainJob(pid_t peer_pid)358 void UnwindingWorker::DrainJob(pid_t peer_pid) {
359   auto it = client_data_.find(peer_pid);
360   if (it == client_data_.end()) {
361     return;
362   }
363   ClientData& client_data = it->second;
364   auto res = ReadAndUnwindBatch(&client_data);
365   switch (res.status) {
366     case ReadAndUnwindBatchResult::Status::kHasMore:
367       if (res.bytes_read < client_data.drain_bytes) {
368         client_data.drain_bytes -= res.bytes_read;
369         thread_task_runner_.get()->PostTask(
370             [this, peer_pid] { DrainJob(peer_pid); });
371         return;
372       }
373       // ReadAndUnwindBatch read more than client_data.drain_bytes.
374       break;
375     case ReadAndUnwindBatchResult::Status::kReadSome:
376       // ReadAndUnwindBatch read all the available data (for now) in the shared
377       // memory buffer.
378     case ReadAndUnwindBatchResult::Status::kReadNone:
379       // There was no data in the shared memory buffer.
380       break;
381   }
382   // No further drain task has been scheduled. Drain is finished. Finish the
383   // disconnect operation as well.
384 
385   FinishDisconnect(it);
386 }
387 
388 // static
HandleBuffer(UnwindingWorker * self,AllocRecordArena * alloc_record_arena,const SharedRingBuffer::Buffer & buf,ClientData * client_data,pid_t peer_pid,Delegate * delegate)389 void UnwindingWorker::HandleBuffer(UnwindingWorker* self,
390                                    AllocRecordArena* alloc_record_arena,
391                                    const SharedRingBuffer::Buffer& buf,
392                                    ClientData* client_data,
393                                    pid_t peer_pid,
394                                    Delegate* delegate) {
395   UnwindingMetadata* unwinding_metadata = &client_data->metadata;
396   DataSourceInstanceID data_source_instance_id =
397       client_data->data_source_instance_id;
398   WireMessage msg;
399   // TODO(fmayer): standardise on char* or uint8_t*.
400   // char* has stronger guarantees regarding aliasing.
401   // see https://timsong-cpp.github.io/cppwp/n3337/basic.lval#10.8
402   if (!ReceiveWireMessage(reinterpret_cast<char*>(buf.data), buf.size, &msg)) {
403     PERFETTO_DFATAL_OR_ELOG("Failed to receive wire message.");
404     return;
405   }
406 
407   if (msg.record_type == RecordType::Malloc) {
408     std::unique_ptr<AllocRecord> rec = alloc_record_arena->BorrowAllocRecord();
409     rec->alloc_metadata = *msg.alloc_header;
410     rec->pid = peer_pid;
411     rec->data_source_instance_id = data_source_instance_id;
412     auto start_time_us = base::GetWallTimeNs() / 1000;
413     if (!client_data->stream_allocations)
414       DoUnwind(&msg, unwinding_metadata, rec.get());
415     rec->unwinding_time_us = static_cast<uint64_t>(
416         ((base::GetWallTimeNs() / 1000) - start_time_us).count());
417     delegate->PostAllocRecord(self, std::move(rec));
418   } else if (msg.record_type == RecordType::Free) {
419     FreeRecord rec;
420     rec.pid = peer_pid;
421     rec.data_source_instance_id = data_source_instance_id;
422     // We need to copy this, so we can return the memory to the shmem buffer.
423     memcpy(&rec.entry, msg.free_header, sizeof(*msg.free_header));
424     client_data->free_records.emplace_back(std::move(rec));
425     if (client_data->free_records.size() == kRecordBatchSize) {
426       delegate->PostFreeRecord(self, std::move(client_data->free_records));
427       client_data->free_records.clear();
428       client_data->free_records.reserve(kRecordBatchSize);
429     }
430   } else if (msg.record_type == RecordType::HeapName) {
431     HeapNameRecord rec;
432     rec.pid = peer_pid;
433     rec.data_source_instance_id = data_source_instance_id;
434     memcpy(&rec.entry, msg.heap_name_header, sizeof(*msg.heap_name_header));
435     rec.entry.heap_name[sizeof(rec.entry.heap_name) - 1] = '\0';
436     delegate->PostHeapNameRecord(self, std::move(rec));
437   } else {
438     PERFETTO_DFATAL_OR_ELOG("Invalid record type.");
439   }
440 }
441 
PostHandoffSocket(HandoffData handoff_data)442 void UnwindingWorker::PostHandoffSocket(HandoffData handoff_data) {
443   // Even with C++14, this cannot be moved, as std::function has to be
444   // copyable, which HandoffData is not.
445   HandoffData* raw_data = new HandoffData(std::move(handoff_data));
446   // We do not need to use a WeakPtr here because the task runner will not
447   // outlive its UnwindingWorker.
448   thread_task_runner_.get()->PostTask([this, raw_data] {
449     HandoffData data = std::move(*raw_data);
450     delete raw_data;
451     HandleHandoffSocket(std::move(data));
452   });
453 }
454 
HandleHandoffSocket(HandoffData handoff_data)455 void UnwindingWorker::HandleHandoffSocket(HandoffData handoff_data) {
456   auto sock = base::UnixSocket::AdoptConnected(
457       handoff_data.sock.ReleaseFd(), this, this->thread_task_runner_.get(),
458       base::SockFamily::kUnix, base::SockType::kStream);
459   pid_t peer_pid = sock->peer_pid_linux();
460 
461   UnwindingMetadata metadata(std::move(handoff_data.maps_fd),
462                              std::move(handoff_data.mem_fd));
463   ClientData client_data{
464       handoff_data.data_source_instance_id,
465       std::move(sock),
466       std::move(metadata),
467       std::move(handoff_data.shmem),
468       std::move(handoff_data.client_config),
469       handoff_data.stream_allocations,
470       /*drain_bytes=*/0,
471       /*free_records=*/{},
472   };
473   client_data.free_records.reserve(kRecordBatchSize);
474   client_data.shmem.SetReaderPaused();
475   client_data_.emplace(peer_pid, std::move(client_data));
476   alloc_record_arena_.Enable();
477 }
478 
HandleDrainFree(DataSourceInstanceID ds_id,pid_t pid)479 void UnwindingWorker::HandleDrainFree(DataSourceInstanceID ds_id, pid_t pid) {
480   auto it = client_data_.find(pid);
481   if (it != client_data_.end()) {
482     ClientData& client_data = it->second;
483 
484     if (!client_data.free_records.empty()) {
485       delegate_->PostFreeRecord(this, std::move(client_data.free_records));
486       client_data.free_records.clear();
487       client_data.free_records.reserve(kRecordBatchSize);
488     }
489   }
490   delegate_->PostDrainDone(this, ds_id);
491 }
492 
PostDisconnectSocket(pid_t pid)493 void UnwindingWorker::PostDisconnectSocket(pid_t pid) {
494   // We do not need to use a WeakPtr here because the task runner will not
495   // outlive its UnwindingWorker.
496   thread_task_runner_.get()->PostTask(
497       [this, pid] { HandleDisconnectSocket(pid); });
498 }
499 
PostPurgeProcess(pid_t pid)500 void UnwindingWorker::PostPurgeProcess(pid_t pid) {
501   // We do not need to use a WeakPtr here because the task runner will not
502   // outlive its UnwindingWorker.
503   thread_task_runner_.get()->PostTask([this, pid] {
504     auto it = client_data_.find(pid);
505     if (it == client_data_.end()) {
506       return;
507     }
508     RemoveClientData(it);
509   });
510 }
511 
PostDrainFree(DataSourceInstanceID ds_id,pid_t pid)512 void UnwindingWorker::PostDrainFree(DataSourceInstanceID ds_id, pid_t pid) {
513   // We do not need to use a WeakPtr here because the task runner will not
514   // outlive its UnwindingWorker.
515   thread_task_runner_.get()->PostTask(
516       [this, ds_id, pid] { HandleDrainFree(ds_id, pid); });
517 }
518 
HandleDisconnectSocket(pid_t pid)519 void UnwindingWorker::HandleDisconnectSocket(pid_t pid) {
520   auto it = client_data_.find(pid);
521   if (it == client_data_.end()) {
522     // This is expected if the client voluntarily disconnects before the
523     // profiling session ended. In that case, there is a race between the main
524     // thread learning about the disconnect and it calling back here.
525     return;
526   }
527   ClientData& client_data = it->second;
528   // Shutdown and call OnDisconnect handler.
529   client_data.shmem.SetShuttingDown();
530   client_data.sock->Shutdown(/* notify= */ true);
531 }
532 
BorrowAllocRecord()533 std::unique_ptr<AllocRecord> AllocRecordArena::BorrowAllocRecord() {
534   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
535   if (!alloc_records_.empty()) {
536     std::unique_ptr<AllocRecord> result = std::move(alloc_records_.back());
537     alloc_records_.pop_back();
538     return result;
539   }
540   return std::unique_ptr<AllocRecord>(new AllocRecord());
541 }
542 
ReturnAllocRecord(std::unique_ptr<AllocRecord> record)543 void AllocRecordArena::ReturnAllocRecord(std::unique_ptr<AllocRecord> record) {
544   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
545   if (enabled_ && record && alloc_records_.size() < kMaxAllocRecordArenaSize)
546     alloc_records_.emplace_back(std::move(record));
547 }
548 
Disable()549 void AllocRecordArena::Disable() {
550   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
551   alloc_records_.clear();
552   enabled_ = false;
553 }
554 
Enable()555 void AllocRecordArena::Enable() {
556   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
557   enabled_ = true;
558 }
559 
560 UnwindingWorker::Delegate::~Delegate() = default;
561 
562 }  // namespace profiling
563 }  // namespace perfetto
564