1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <errno.h>
8
9 #include <memory>
10 #include <optional>
11 #include <vector>
12
13 #include "base/bits.h"
14 #include "base/logging.h"
15 #include "base/memory/page_size.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/memory/shared_memory_tracker.h"
18 #include "base/notimplemented.h"
19 #include "base/process/process_metrics.h"
20 #include "base/strings/string_util.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/trace_event/memory_infra_background_allowlist.h"
23 #include "base/trace_event/trace_event_impl.h"
24 #include "base/trace_event/traced_value.h"
25 #include "base/unguessable_token.h"
26 #include "build/build_config.h"
27 #include "third_party/perfetto/protos/perfetto/trace/memory_graph.pbzero.h"
28 #include "third_party/perfetto/protos/perfetto/trace/trace_packet.pbzero.h"
29
30 #if BUILDFLAG(IS_IOS)
31 #include <mach/vm_page_size.h>
32 #endif
33
34 #if BUILDFLAG(IS_POSIX)
35 #include <sys/mman.h>
36 #endif
37
38 #if BUILDFLAG(IS_WIN)
39 #include <windows.h> // Must be in front of other Windows header files
40
41 #include <Psapi.h>
42 #endif
43
44 #if BUILDFLAG(IS_FUCHSIA)
45 #include <tuple>
46
47 #include "base/notreached.h"
48 #endif
49
50 using ProcessSnapshot =
51 ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot;
52
53 namespace base {
54 namespace trace_event {
55
56 namespace {
57
58 const char kEdgeTypeOwnership[] = "ownership";
59
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)60 std::string GetSharedGlobalAllocatorDumpName(
61 const MemoryAllocatorDumpGuid& guid) {
62 return "global/" + guid.ToString();
63 }
64
65 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
GetSystemPageCount(size_t mapped_size,size_t page_size)66 size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
67 return (mapped_size + page_size - 1) / page_size;
68 }
69 #endif
70
GetTokenForCurrentProcess()71 UnguessableToken GetTokenForCurrentProcess() {
72 static UnguessableToken instance = UnguessableToken::Create();
73 return instance;
74 }
75
76 } // namespace
77
78 // static
79 bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
80
81 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
82 // static
GetSystemPageSize()83 size_t ProcessMemoryDump::GetSystemPageSize() {
84 #if BUILDFLAG(IS_IOS)
85 // On iOS, getpagesize() returns the user page sizes, but for allocating
86 // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
87 // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
88 // Refer to http://crbug.com/542671 and Apple rdar://23651782
89 return vm_kernel_page_size;
90 #else
91 return base::GetPageSize();
92 #endif // BUILDFLAG(IS_IOS)
93 }
94
95 // static
CountResidentBytes(void * start_address,size_t mapped_size)96 std::optional<size_t> ProcessMemoryDump::CountResidentBytes(
97 void* start_address,
98 size_t mapped_size) {
99 const size_t page_size = GetSystemPageSize();
100 const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
101 DCHECK_EQ(0u, start_pointer % page_size);
102
103 size_t offset = 0;
104 size_t total_resident_pages = 0;
105 bool failure = false;
106
107 // An array as large as number of pages in memory segment needs to be passed
108 // to the query function. To avoid allocating a large array, the given block
109 // of memory is split into chunks of size |kMaxChunkSize|.
110 const size_t kMaxChunkSize = 8 * 1024 * 1024;
111 size_t max_vec_size =
112 GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
113 #if BUILDFLAG(IS_WIN)
114 std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
115 new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
116 #elif BUILDFLAG(IS_APPLE)
117 std::unique_ptr<char[]> vec(new char[max_vec_size]);
118 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
119 std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
120 #endif
121
122 while (offset < mapped_size) {
123 uintptr_t chunk_start = (start_pointer + offset);
124 const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
125 const size_t page_count = GetSystemPageCount(chunk_size, page_size);
126 size_t resident_page_count = 0;
127 #if BUILDFLAG(IS_WIN)
128 for (size_t i = 0; i < page_count; i++) {
129 vec[i].VirtualAddress =
130 reinterpret_cast<void*>(chunk_start + i * page_size);
131 }
132 DWORD vec_size = static_cast<DWORD>(
133 page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
134 failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
135
136 for (size_t i = 0; i < page_count; i++)
137 resident_page_count += vec[i].VirtualAttributes.Valid;
138 #elif BUILDFLAG(IS_FUCHSIA)
139 // TODO(crbug.com/851760): Implement counting resident bytes.
140 // For now, log and avoid unused variable warnings.
141 NOTIMPLEMENTED_LOG_ONCE();
142 std::ignore = chunk_start;
143 std::ignore = page_count;
144 #elif BUILDFLAG(IS_APPLE)
145 // mincore in MAC does not fail with EAGAIN.
146 failure =
147 !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
148 for (size_t i = 0; i < page_count; i++)
149 resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
150 #elif BUILDFLAG(IS_POSIX)
151 int error_counter = 0;
152 int result = 0;
153 // HANDLE_EINTR tries for 100 times. So following the same pattern.
154 do {
155 result =
156 #if BUILDFLAG(IS_AIX)
157 mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
158 reinterpret_cast<char*>(vec.get()));
159 #else
160 mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
161 #endif
162 } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
163 failure = !!result;
164
165 for (size_t i = 0; i < page_count; i++)
166 resident_page_count += vec[i] & 1;
167 #endif
168
169 if (failure)
170 break;
171
172 total_resident_pages += resident_page_count * page_size;
173 offset += kMaxChunkSize;
174 }
175
176 DCHECK(!failure);
177 if (failure) {
178 LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
179 return std::nullopt;
180 }
181 return total_resident_pages;
182 }
183
184 // static
CountResidentBytesInSharedMemory(void * start_address,size_t mapped_size)185 std::optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
186 void* start_address,
187 size_t mapped_size) {
188 // `MapAt()` performs some internal arithmetic to allow non-page-aligned
189 // offsets, but the memory accounting still expects to work with page-aligned
190 // allocations.
191 //
192 // TODO(dcheng): one peculiarity here is that the shmem implementation uses
193 // `base::SysInfo::VMAllocationGranularity()` while this file uses
194 // `GetSystemPageSize()`. It'd be nice not to have two names for the same
195 // thing...
196 uint8_t* aligned_start_address = base::bits::AlignDown(
197 static_cast<uint8_t*>(start_address), GetSystemPageSize());
198 size_t adjusted_size =
199 mapped_size + static_cast<size_t>(static_cast<uint8_t*>(start_address) -
200 aligned_start_address);
201
202 #if BUILDFLAG(IS_APPLE)
203 // On macOS and iOS, use mach_vm_region|vm_region_64 instead of mincore for
204 // performance (crbug.com/742042).
205 mach_vm_size_t dummy_size = 0;
206 mach_vm_address_t address =
207 reinterpret_cast<mach_vm_address_t>(aligned_start_address);
208 vm_region_top_info_data_t info;
209 MachVMRegionResult result =
210 GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
211 if (result == MachVMRegionResult::Error) {
212 LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
213 "is invalid";
214 return std::optional<size_t>();
215 }
216
217 size_t resident_pages =
218 info.private_pages_resident + info.shared_pages_resident;
219
220 // On macOS and iOS, measurements for private memory footprint overcount by
221 // faulted pages in anonymous shared memory. To discount for this, we touch
222 // all the resident pages in anonymous shared memory here, thus making them
223 // faulted as well. This relies on two assumptions:
224 //
225 // 1) Consumers use shared memory from front to back. Thus, if there are
226 // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
227 // the shared memory region.
228 //
229 // 2) This logic is run shortly before the logic that calculates
230 // phys_footprint, thus ensuring that the discrepancy between faulted and
231 // resident pages is minimal.
232 //
233 // The performance penalty is expected to be small.
234 //
235 // * Most of the time, we expect the pages to already be resident and faulted,
236 // thus incurring a cache penalty read hit [since we read from each resident
237 // page].
238 //
239 // * Rarely, we expect the pages to be resident but not faulted, resulting in
240 // soft faults + cache penalty.
241 //
242 // * If assumption (1) is invalid, this will potentially fault some
243 // previously non-resident pages, thus increasing memory usage, without fixing
244 // the accounting.
245 //
246 // Sanity check in case the mapped size is less than the total size of the
247 // region.
248 size_t pages_to_fault =
249 std::min(resident_pages, (adjusted_size + PAGE_SIZE - 1) / PAGE_SIZE);
250
251 volatile uint8_t* base_address = const_cast<uint8_t*>(aligned_start_address);
252 for (size_t i = 0; i < pages_to_fault; ++i) {
253 // Reading from a volatile is a visible side-effect for the purposes of
254 // optimization. This guarantees that the optimizer will not kill this line.
255 base_address[i * PAGE_SIZE];
256 }
257
258 return resident_pages * PAGE_SIZE;
259 #else
260 return CountResidentBytes(aligned_start_address, adjusted_size);
261 #endif // BUILDFLAG(IS_MAC)
262 }
263
264 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
265
ProcessMemoryDump(const MemoryDumpArgs & dump_args)266 ProcessMemoryDump::ProcessMemoryDump(
267 const MemoryDumpArgs& dump_args)
268 : process_token_(GetTokenForCurrentProcess()),
269 dump_args_(dump_args) {}
270
271 ProcessMemoryDump::~ProcessMemoryDump() = default;
272 ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
273 ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
274 default;
275
CreateAllocatorDump(const std::string & absolute_name)276 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
277 const std::string& absolute_name) {
278 return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
279 absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
280 }
281
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)282 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
283 const std::string& absolute_name,
284 const MemoryAllocatorDumpGuid& guid) {
285 return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
286 absolute_name, dump_args_.level_of_detail, guid));
287 }
288
AddAllocatorDumpInternal(std::unique_ptr<MemoryAllocatorDump> mad)289 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
290 std::unique_ptr<MemoryAllocatorDump> mad) {
291 // In background mode return the black hole dump, if invalid dump name is
292 // given.
293 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground &&
294 !IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
295 return GetBlackHoleMad(mad->absolute_name());
296 }
297
298 auto insertion_result = allocator_dumps_.insert(
299 std::make_pair(mad->absolute_name(), std::move(mad)));
300 MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
301 DCHECK(insertion_result.second) << "Duplicate name: "
302 << inserted_mad->absolute_name();
303 return inserted_mad;
304 }
305
GetAllocatorDump(const std::string & absolute_name) const306 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
307 const std::string& absolute_name) const {
308 auto it = allocator_dumps_.find(absolute_name);
309 if (it != allocator_dumps_.end())
310 return it->second.get();
311 return nullptr;
312 }
313
GetOrCreateAllocatorDump(const std::string & absolute_name)314 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
315 const std::string& absolute_name) {
316 MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
317 return mad ? mad : CreateAllocatorDump(absolute_name);
318 }
319
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)320 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
321 const MemoryAllocatorDumpGuid& guid) {
322 // A shared allocator dump can be shared within a process and the guid could
323 // have been created already.
324 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
325 if (mad && mad != black_hole_mad_.get()) {
326 // The weak flag is cleared because this method should create a non-weak
327 // dump.
328 mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
329 return mad;
330 }
331 return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
332 }
333
CreateWeakSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)334 MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
335 const MemoryAllocatorDumpGuid& guid) {
336 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
337 if (mad && mad != black_hole_mad_.get())
338 return mad;
339 mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
340 mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
341 return mad;
342 }
343
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const344 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
345 const MemoryAllocatorDumpGuid& guid) const {
346 return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
347 }
348
DumpHeapUsage(const std::unordered_map<base::trace_event::AllocationContext,base::trace_event::AllocationMetrics> & metrics_by_context,base::trace_event::TraceEventMemoryOverhead & overhead,const char * allocator_name)349 void ProcessMemoryDump::DumpHeapUsage(
350 const std::unordered_map<base::trace_event::AllocationContext,
351 base::trace_event::AllocationMetrics>&
352 metrics_by_context,
353 base::trace_event::TraceEventMemoryOverhead& overhead,
354 const char* allocator_name) {
355 std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
356 allocator_name);
357 overhead.DumpInto(base_name.c_str(), this);
358 }
359
SetAllocatorDumpsForSerialization(std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps)360 void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
361 std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
362 DCHECK(allocator_dumps_.empty());
363 for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
364 AddAllocatorDumpInternal(std::move(dump));
365 }
366
367 std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
GetAllEdgesForSerialization() const368 ProcessMemoryDump::GetAllEdgesForSerialization() const {
369 std::vector<MemoryAllocatorDumpEdge> edges;
370 edges.reserve(allocator_dumps_edges_.size());
371 for (const auto& it : allocator_dumps_edges_)
372 edges.push_back(it.second);
373 return edges;
374 }
375
SetAllEdgesForSerialization(const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge> & edges)376 void ProcessMemoryDump::SetAllEdgesForSerialization(
377 const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
378 DCHECK(allocator_dumps_edges_.empty());
379 for (const MemoryAllocatorDumpEdge& edge : edges) {
380 auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
381 DCHECK(it_and_inserted.second);
382 }
383 }
384
Clear()385 void ProcessMemoryDump::Clear() {
386 allocator_dumps_.clear();
387 allocator_dumps_edges_.clear();
388 }
389
TakeAllDumpsFrom(ProcessMemoryDump * other)390 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
391 // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
392 // into this ProcessMemoryDump, checking for duplicates.
393 for (auto& it : other->allocator_dumps_)
394 AddAllocatorDumpInternal(std::move(it.second));
395 other->allocator_dumps_.clear();
396
397 // Move all the edges.
398 allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
399 other->allocator_dumps_edges_.end());
400 other->allocator_dumps_edges_.clear();
401 }
402
SerializeAllocatorDumpsInto(TracedValue * value) const403 void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
404 if (allocator_dumps_.size() > 0) {
405 value->BeginDictionary("allocators");
406 for (const auto& allocator_dump_it : allocator_dumps_)
407 allocator_dump_it.second->AsValueInto(value);
408 value->EndDictionary();
409 }
410
411 value->BeginArray("allocators_graph");
412 for (const auto& it : allocator_dumps_edges_) {
413 const MemoryAllocatorDumpEdge& edge = it.second;
414 value->BeginDictionary();
415 value->SetString("source", edge.source.ToString());
416 value->SetString("target", edge.target.ToString());
417 value->SetInteger("importance", edge.importance);
418 value->SetString("type", kEdgeTypeOwnership);
419 value->EndDictionary();
420 }
421 value->EndArray();
422 }
423
SerializeAllocatorDumpsInto(perfetto::protos::pbzero::MemoryTrackerSnapshot * memory_snapshot,const base::ProcessId pid) const424 void ProcessMemoryDump::SerializeAllocatorDumpsInto(
425 perfetto::protos::pbzero::MemoryTrackerSnapshot* memory_snapshot,
426 const base::ProcessId pid) const {
427 ProcessSnapshot* process_snapshot =
428 memory_snapshot->add_process_memory_dumps();
429 process_snapshot->set_pid(static_cast<int>(pid));
430
431 for (const auto& allocator_dump_it : allocator_dumps_) {
432 ProcessSnapshot::MemoryNode* memory_node =
433 process_snapshot->add_allocator_dumps();
434 allocator_dump_it.second->AsProtoInto(memory_node);
435 }
436
437 for (const auto& it : allocator_dumps_edges_) {
438 const MemoryAllocatorDumpEdge& edge = it.second;
439 ProcessSnapshot::MemoryEdge* memory_edge =
440 process_snapshot->add_memory_edges();
441
442 memory_edge->set_source_id(edge.source.ToUint64());
443 memory_edge->set_target_id(edge.target.ToUint64());
444 // TODO(crbug.com/1333557): Fix .proto and remove this cast.
445 memory_edge->set_importance(static_cast<uint32_t>(edge.importance));
446 }
447 }
448
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)449 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
450 const MemoryAllocatorDumpGuid& target,
451 int importance) {
452 // This will either override an existing edge or create a new one.
453 auto it = allocator_dumps_edges_.find(source);
454 int max_importance = importance;
455 if (it != allocator_dumps_edges_.end()) {
456 DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
457 max_importance = std::max(importance, it->second.importance);
458 }
459 allocator_dumps_edges_[source] = {source, target, max_importance,
460 false /* overridable */};
461 }
462
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)463 void ProcessMemoryDump::AddOwnershipEdge(
464 const MemoryAllocatorDumpGuid& source,
465 const MemoryAllocatorDumpGuid& target) {
466 AddOwnershipEdge(source, target, 0 /* importance */);
467 }
468
AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)469 void ProcessMemoryDump::AddOverridableOwnershipEdge(
470 const MemoryAllocatorDumpGuid& source,
471 const MemoryAllocatorDumpGuid& target,
472 int importance) {
473 if (allocator_dumps_edges_.count(source) == 0) {
474 allocator_dumps_edges_[source] = {source, target, importance,
475 true /* overridable */};
476 } else {
477 // An edge between the source and target already exits. So, do nothing here
478 // since the new overridable edge is implicitly overridden by a strong edge
479 // which was created earlier.
480 DCHECK(!allocator_dumps_edges_[source].overridable);
481 }
482 }
483
CreateSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)484 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
485 const MemoryAllocatorDumpGuid& client_local_dump_guid,
486 const UnguessableToken& shared_memory_guid,
487 int importance) {
488 CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
489 shared_memory_guid, importance,
490 false /*is_weak*/);
491 }
492
CreateWeakSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)493 void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
494 const MemoryAllocatorDumpGuid& client_local_dump_guid,
495 const UnguessableToken& shared_memory_guid,
496 int importance) {
497 CreateSharedMemoryOwnershipEdgeInternal(
498 client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
499 }
500
CreateSharedMemoryOwnershipEdgeInternal(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance,bool is_weak)501 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
502 const MemoryAllocatorDumpGuid& client_local_dump_guid,
503 const UnguessableToken& shared_memory_guid,
504 int importance,
505 bool is_weak) {
506 DCHECK(!shared_memory_guid.is_empty());
507 // New model where the global dumps created by SharedMemoryTracker are used
508 // for the clients.
509
510 // The guid of the local dump created by SharedMemoryTracker for the memory
511 // segment.
512 auto local_shm_guid =
513 GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
514
515 // The dump guid of the global dump created by the tracker for the memory
516 // segment.
517 auto global_shm_guid =
518 SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
519
520 // Create an edge between local dump of the client and the local dump of the
521 // SharedMemoryTracker. Do not need to create the dumps here since the tracker
522 // would create them. The importance is also required here for the case of
523 // single process mode.
524 AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
525
526 // TODO(ssid): Handle the case of weak dumps here. This needs a new function
527 // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
528 // created global dump.
529 // Create an edge that overrides the edge created by SharedMemoryTracker.
530 AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
531 }
532
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)533 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
534 const std::string& target_node_name) {
535 // Do not create new dumps for suballocations in background mode.
536 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground) {
537 return;
538 }
539
540 std::string child_mad_name = target_node_name + "/__" + source.ToString();
541 MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
542 AddOwnershipEdge(source, target_child_mad->guid());
543 }
544
GetBlackHoleMad(const std::string & absolute_name)545 MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad(
546 const std::string& absolute_name) {
547 DCHECK(is_black_hole_non_fatal_for_testing_)
548 << " unknown dump name " << absolute_name
549 << " this likely means kAllocatorDumpNameAllowlist needs to be updated";
550 if (!black_hole_mad_) {
551 std::string name = "discarded";
552 black_hole_mad_ = std::make_unique<MemoryAllocatorDump>(
553 name, dump_args_.level_of_detail, GetDumpId(name));
554 }
555 return black_hole_mad_.get();
556 }
557
GetDumpId(const std::string & absolute_name)558 MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
559 const std::string& absolute_name) {
560 return MemoryAllocatorDumpGuid(StringPrintf(
561 "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
562 }
563
564 } // namespace trace_event
565 } // namespace base
566