xref: /aosp_15_r20/external/cronet/base/allocator/partition_alloc_support.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_alloc_support.h"
6 
7 #include <array>
8 #include <cinttypes>
9 #include <cstdint>
10 #include <map>
11 #include <optional>
12 #include <string>
13 #include <string_view>
14 
15 #include "base/allocator/partition_alloc_features.h"
16 #include "base/at_exit.h"
17 #include "base/check.h"
18 #include "base/cpu.h"
19 #include "base/debug/dump_without_crashing.h"
20 #include "base/debug/stack_trace.h"
21 #include "base/debug/task_trace.h"
22 #include "base/feature_list.h"
23 #include "base/functional/bind.h"
24 #include "base/functional/callback.h"
25 #include "base/immediate_crash.h"
26 #include "base/location.h"
27 #include "base/memory/post_delayed_memory_reduction_task.h"
28 #include "base/memory/raw_ptr_asan_service.h"
29 #include "base/metrics/histogram_functions.h"
30 #include "base/metrics/histogram_macros.h"
31 #include "base/no_destructor.h"
32 #include "base/pending_task.h"
33 #include "base/ranges/algorithm.h"
34 #include "base/strings/string_split.h"
35 #include "base/strings/stringprintf.h"
36 #include "base/system/sys_info.h"
37 #include "base/task/single_thread_task_runner.h"
38 #include "base/thread_annotations.h"
39 #include "base/threading/platform_thread.h"
40 #include "base/time/time.h"
41 #include "base/timer/timer.h"
42 #include "base/trace_event/base_tracing.h"
43 #include "build/build_config.h"
44 #include "partition_alloc/allocation_guard.h"
45 #include "partition_alloc/dangling_raw_ptr_checks.h"
46 #include "partition_alloc/memory_reclaimer.h"
47 #include "partition_alloc/page_allocator.h"
48 #include "partition_alloc/partition_alloc_base/debug/alias.h"
49 #include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
50 #include "partition_alloc/partition_alloc_buildflags.h"
51 #include "partition_alloc/partition_alloc_check.h"
52 #include "partition_alloc/partition_alloc_config.h"
53 #include "partition_alloc/partition_lock.h"
54 #include "partition_alloc/partition_root.h"
55 #include "partition_alloc/pointers/instance_tracer.h"
56 #include "partition_alloc/pointers/raw_ptr.h"
57 #include "partition_alloc/shim/allocator_shim.h"
58 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
59 #include "partition_alloc/stack/stack.h"
60 #include "partition_alloc/thread_cache.h"
61 
62 #if BUILDFLAG(USE_STARSCAN)
63 #include "partition_alloc/shim/nonscannable_allocator.h"
64 #include "partition_alloc/starscan/pcscan.h"
65 #include "partition_alloc/starscan/pcscan_scheduling.h"
66 #include "partition_alloc/starscan/stats_collector.h"
67 #include "partition_alloc/starscan/stats_reporter.h"
68 #endif  // BUILDFLAG(USE_STARSCAN)
69 
70 #if BUILDFLAG(IS_ANDROID)
71 #include "base/system/sys_info.h"
72 #endif
73 
74 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
75 #include "partition_alloc/memory_reclaimer.h"
76 #endif
77 
78 #if BUILDFLAG(IS_ANDROID) && BUILDFLAG(HAS_MEMORY_TAGGING)
79 #include <sys/system_properties.h>
80 #endif
81 
82 namespace base::allocator {
83 
84 namespace {
85 
86 #if BUILDFLAG(IS_ANDROID) && BUILDFLAG(HAS_MEMORY_TAGGING)
87 enum class BootloaderOverride {
88   kDefault,
89   kForceOn,
90   kForceOff,
91 };
92 
GetBootloaderOverride()93 BootloaderOverride GetBootloaderOverride() {
94   char bootloader_override_str[PROP_VALUE_MAX];
95   __system_property_get(
96       "persist.device_config.runtime_native_boot.bootloader_override",
97       bootloader_override_str);
98 
99   if (strcmp(bootloader_override_str, "force_on") == 0) {
100     return BootloaderOverride::kForceOn;
101   }
102   if (strcmp(bootloader_override_str, "force_off") == 0) {
103     return BootloaderOverride::kForceOff;
104   }
105   return BootloaderOverride::kDefault;
106 }
107 #endif
108 
109 // When under this experiment avoid running periodic purging or reclaim for the
110 // first minute after the first attempt. This is based on the insight that
111 // processes often don't live paste this minute.
112 static BASE_FEATURE(kDelayFirstPeriodicPAPurgeOrReclaim,
113                     "DelayFirstPeriodicPAPurgeOrReclaim",
114                     base::FEATURE_ENABLED_BY_DEFAULT);
115 constexpr base::TimeDelta kFirstPAPurgeOrReclaimDelay = base::Minutes(1);
116 
117 // This is defined in content/public/common/content_switches.h, which is not
118 // accessible in ::base. They must be kept in sync.
119 namespace switches {
120 [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
121 constexpr char kZygoteProcess[] = "zygote";
122 #if BUILDFLAG(USE_STARSCAN)
123 constexpr char kGpuProcess[] = "gpu-process";
124 constexpr char kUtilityProcess[] = "utility";
125 #endif
126 }  // namespace switches
127 
128 #if BUILDFLAG(USE_STARSCAN)
129 
130 #if BUILDFLAG(ENABLE_BASE_TRACING)
ScannerIdToTracingString(partition_alloc::internal::StatsCollector::ScannerId id)131 constexpr const char* ScannerIdToTracingString(
132     partition_alloc::internal::StatsCollector::ScannerId id) {
133   switch (id) {
134     case partition_alloc::internal::StatsCollector::ScannerId::kClear:
135       return "PCScan.Scanner.Clear";
136     case partition_alloc::internal::StatsCollector::ScannerId::kScan:
137       return "PCScan.Scanner.Scan";
138     case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
139       return "PCScan.Scanner.Sweep";
140     case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
141       return "PCScan.Scanner";
142     case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
143       __builtin_unreachable();
144   }
145 }
146 
MutatorIdToTracingString(partition_alloc::internal::StatsCollector::MutatorId id)147 constexpr const char* MutatorIdToTracingString(
148     partition_alloc::internal::StatsCollector::MutatorId id) {
149   switch (id) {
150     case partition_alloc::internal::StatsCollector::MutatorId::kClear:
151       return "PCScan.Mutator.Clear";
152     case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
153       return "PCScan.Mutator.ScanStack";
154     case partition_alloc::internal::StatsCollector::MutatorId::kScan:
155       return "PCScan.Mutator.Scan";
156     case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
157       return "PCScan.Mutator";
158     case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
159       __builtin_unreachable();
160   }
161 }
162 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
163 
164 // Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
165 class StatsReporterImpl final : public partition_alloc::StatsReporter {
166  public:
ReportTraceEvent(partition_alloc::internal::StatsCollector::ScannerId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)167   void ReportTraceEvent(
168       partition_alloc::internal::StatsCollector::ScannerId id,
169       [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
170       int64_t start_time_ticks_internal_value,
171       int64_t end_time_ticks_internal_value) override {
172 #if BUILDFLAG(ENABLE_BASE_TRACING)
173     // TRACE_EVENT_* macros below drop most parameters when tracing is
174     // disabled at compile time.
175     const char* tracing_id = ScannerIdToTracingString(id);
176     const TimeTicks start_time =
177         TimeTicks::FromInternalValue(start_time_ticks_internal_value);
178     const TimeTicks end_time =
179         TimeTicks::FromInternalValue(end_time_ticks_internal_value);
180     TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
181                       perfetto::ThreadTrack::ForThread(tid), start_time);
182     TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
183                     end_time);
184 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
185   }
186 
ReportTraceEvent(partition_alloc::internal::StatsCollector::MutatorId id,partition_alloc::internal::base::PlatformThreadId tid,int64_t start_time_ticks_internal_value,int64_t end_time_ticks_internal_value)187   void ReportTraceEvent(
188       partition_alloc::internal::StatsCollector::MutatorId id,
189       [[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
190       int64_t start_time_ticks_internal_value,
191       int64_t end_time_ticks_internal_value) override {
192 #if BUILDFLAG(ENABLE_BASE_TRACING)
193     // TRACE_EVENT_* macros below drop most parameters when tracing is
194     // disabled at compile time.
195     const char* tracing_id = MutatorIdToTracingString(id);
196     const TimeTicks start_time =
197         TimeTicks::FromInternalValue(start_time_ticks_internal_value);
198     const TimeTicks end_time =
199         TimeTicks::FromInternalValue(end_time_ticks_internal_value);
200     TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
201                       perfetto::ThreadTrack::ForThread(tid), start_time);
202     TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
203                     end_time);
204 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
205   }
206 
ReportSurvivedQuarantineSize(size_t survived_size)207   void ReportSurvivedQuarantineSize(size_t survived_size) override {
208     TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
209                    survived_size);
210   }
211 
ReportSurvivedQuarantinePercent(double survived_rate)212   void ReportSurvivedQuarantinePercent(double survived_rate) override {
213     // Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
214     // divide back.
215     // TODO(bikineev): Remove after switching to perfetto.
216     TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
217                    1000 * survived_rate);
218   }
219 
ReportStats(const char * stats_name,int64_t sample_in_usec)220   void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
221     TimeDelta sample = Microseconds(sample_in_usec);
222     UmaHistogramTimes(stats_name, sample);
223   }
224 
225  private:
226   static constexpr char kTraceCategory[] = "partition_alloc";
227 };
228 
229 #endif  // BUILDFLAG(USE_STARSCAN)
230 
231 }  // namespace
232 
233 #if BUILDFLAG(USE_STARSCAN)
RegisterPCScanStatsReporter()234 void RegisterPCScanStatsReporter() {
235   static StatsReporterImpl s_reporter;
236   static bool registered = false;
237 
238   DCHECK(!registered);
239 
240   partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
241   registered = true;
242 }
243 #endif  // BUILDFLAG(USE_STARSCAN)
244 
245 namespace {
246 
RunThreadCachePeriodicPurge()247 void RunThreadCachePeriodicPurge() {
248   // Micros, since periodic purge should typically take at most a few ms.
249   SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
250   TRACE_EVENT0("memory", "PeriodicPurge");
251   auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
252   instance.RunPeriodicPurge();
253   TimeDelta delay =
254       Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
255   SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
256       FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
257 }
258 
RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)259 void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
260   TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
261   auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
262 
263   {
264     // Micros, since memory reclaiming should typically take at most a few ms.
265     SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
266     instance->ReclaimNormal();
267   }
268 
269   TimeDelta delay = features::kPartitionAllocMemoryReclaimerInterval.Get();
270   if (!delay.is_positive()) {
271     delay =
272         Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
273   }
274 
275   task_runner->PostDelayedTask(
276       FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
277 }
278 
279 }  // namespace
280 
StartThreadCachePeriodicPurge()281 void StartThreadCachePeriodicPurge() {
282   auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
283   TimeDelta delay =
284       Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
285 
286   if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
287     delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
288   }
289 
290   SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
291       FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
292 }
293 
StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner)294 void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
295   if (!base::FeatureList::IsEnabled(
296           base::features::kPartitionAllocMemoryReclaimer)) {
297     return;
298   }
299 
300   // Can be called several times.
301   static bool is_memory_reclaimer_running = false;
302   if (is_memory_reclaimer_running) {
303     return;
304   }
305   is_memory_reclaimer_running = true;
306 
307   // The caller of the API fully controls where running the reclaim.
308   // However there are a few reasons to recommend that the caller runs
309   // it on the main thread:
310   // - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
311   //   is more likely in cache when executing on the main thread.
312   // - Memory reclaim takes the partition lock for each partition. As a
313   //   consequence, while reclaim is running, the main thread is unlikely to be
314   //   able to make progress, as it would be waiting on the lock.
315   // - Finally, this runs in idle time only, so there should be no visible
316   //   impact.
317   //
318   // From local testing, time to reclaim is 100us-1ms, and reclaiming every few
319   // seconds is useful. Since this is meant to run during idle time only, it is
320   // a reasonable starting point balancing effectivenes vs cost. See
321   // crbug.com/942512 for details and experimental results.
322   TimeDelta delay = features::kPartitionAllocMemoryReclaimerInterval.Get();
323   if (!delay.is_positive()) {
324     delay = Microseconds(::partition_alloc::MemoryReclaimer::Instance()
325                              ->GetRecommendedReclaimIntervalInMicroseconds());
326   }
327 
328   if (base::FeatureList::IsEnabled(kDelayFirstPeriodicPAPurgeOrReclaim)) {
329     delay = std::max(delay, kFirstPAPurgeOrReclaimDelay);
330   }
331 
332   task_runner->PostDelayedTask(
333       FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
334 }
335 
ProposeSyntheticFinchTrials()336 std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
337   std::map<std::string, std::string> trials;
338 
339 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
340   trials.emplace("DanglingPointerDetector", "Enabled");
341 #else
342   trials.emplace("DanglingPointerDetector", "Disabled");
343 #endif
344 
345   // This value is not surrounded by build flags as it is meant to be updated
346   // manually in binary experiment patches.
347   trials.emplace("VectorRawPtrExperiment", "Disabled");
348 
349 #if BUILDFLAG(HAS_MEMORY_TAGGING)
350   if (base::FeatureList::IsEnabled(
351           base::features::kPartitionAllocMemoryTagging)) {
352     bool has_mte = base::CPU::GetInstanceNoAllocation().has_mte();
353     if (has_mte) {
354       trials.emplace("MemoryTaggingDogfood", "Enabled");
355     } else {
356       trials.emplace("MemoryTaggingDogfood", "Disabled");
357     }
358 #if BUILDFLAG(IS_ANDROID)
359     BootloaderOverride bootloader_override = GetBootloaderOverride();
360     partition_alloc::TagViolationReportingMode reporting_mode =
361         partition_alloc::TagViolationReportingMode::kUndefined;
362 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
363     reporting_mode = allocator_shim::internal::PartitionAllocMalloc::Allocator()
364                          ->memory_tagging_reporting_mode();
365 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
366     switch (bootloader_override) {
367       case BootloaderOverride::kDefault:
368         trials.emplace("MemoryTaggingBootloaderOverride", "Default");
369         break;
370       case BootloaderOverride::kForceOn:
371         if (has_mte) {
372           switch (reporting_mode) {
373             case partition_alloc::TagViolationReportingMode::kAsynchronous:
374               trials.emplace("MemoryTaggingBootloaderOverride", "ForceOnAsync");
375               break;
376             case partition_alloc::TagViolationReportingMode::kSynchronous:
377               // This should not happen unless user forces it.
378               trials.emplace("MemoryTaggingBootloaderOverride", "ForceOnSync");
379               break;
380             default:
381               // This should not happen unless user forces it.
382               trials.emplace("MemoryTaggingBootloaderOverride",
383                              "ForceOnDisabled");
384           }
385         } else {
386           // This should not happen unless user forces it.
387           trials.emplace("MemoryTaggingBootloaderOverride",
388                          "ForceOnWithoutMte");
389         }
390         break;
391       case BootloaderOverride::kForceOff:
392         if (!has_mte) {
393           trials.emplace("MemoryTaggingBootloaderOverride", "ForceOff");
394         } else {
395           // This should not happen unless user forces it.
396           trials.emplace("MemoryTaggingBootloaderOverride", "ForceOffWithMte");
397         }
398         break;
399     }
400 #endif  // BUILDFLAG(IS_ANDROID)
401   }
402 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
403 
404   return trials;
405 }
406 
407 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
408 
409 namespace {
410 
411 internal::PartitionLock g_stack_trace_buffer_lock;
412 
413 struct DanglingPointerFreeInfo {
414   debug::StackTrace stack_trace;
415   debug::TaskTrace task_trace;
416   uintptr_t id = 0;
417 };
418 using DanglingRawPtrBuffer =
419     std::array<std::optional<DanglingPointerFreeInfo>, 32>;
420 DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
421 
DanglingRawPtrDetected(uintptr_t id)422 void DanglingRawPtrDetected(uintptr_t id) {
423   // This is called from inside the allocator. No allocation is allowed.
424 
425   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
426 
427 #if DCHECK_IS_ON()
428   for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
429     PA_DCHECK(!entry || entry->id != id);
430   }
431 #endif  // DCHECK_IS_ON()
432 
433   for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
434     if (!entry) {
435       entry = {debug::StackTrace(), debug::TaskTrace(), id};
436       return;
437     }
438   }
439 
440   // The StackTrace hasn't been recorded, because the buffer isn't large
441   // enough.
442 }
443 
444 // From the traces recorded in |DanglingRawPtrDetected|, extract the one
445 // whose id match |id|. Return nullopt if not found.
TakeDanglingPointerFreeInfo(uintptr_t id)446 std::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
447     uintptr_t id) {
448   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
449   for (std::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
450     if (entry && entry->id == id) {
451       std::optional<DanglingPointerFreeInfo> result(entry);
452       entry = std::nullopt;
453       return result;
454     }
455   }
456   return std::nullopt;
457 }
458 
459 // Extract from the StackTrace output, the signature of the pertinent caller.
460 // This function is meant to be used only by Chromium developers, to list what
461 // are all the dangling raw_ptr occurrences in a table.
ExtractDanglingPtrSignature(std::string stacktrace)462 std::string ExtractDanglingPtrSignature(std::string stacktrace) {
463   std::vector<std::string_view> lines = SplitStringPiece(
464       stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
465 
466   // We are looking for the callers of the function releasing the raw_ptr and
467   // freeing memory. This lists potential matching patterns. A pattern is a list
468   // of substrings that are all required to match.
469   const std::vector<std::string_view> callee_patterns[] = {
470       // Common signature patters:
471       {"internal::PartitionFree"},
472       {"base::", "::FreeFn"},
473       {"internal::RawPtrBackupRefImpl", "::ReleaseInternal"},
474 
475       // Linux specific:
476       {"base::RefCountedThreadSafe<>::Release"},
477 
478       // Windows specific:
479       {"_free_base"},
480 
481       // Task traces are prefixed with "Task trace:" in
482       // |TaskTrace::OutputToStream|
483       {"Task trace:"},
484   };
485   size_t caller_index = 0;
486   for (size_t i = 0; i < lines.size(); ++i) {
487     for (const auto& patterns : callee_patterns) {
488       if (ranges::all_of(patterns, [&](std::string_view pattern) {
489             return lines[i].find(pattern) != StringPiece::npos;
490           })) {
491         caller_index = i + 1;
492       }
493     }
494   }
495   if (caller_index >= lines.size()) {
496     return "no_callee_match";
497   }
498   std::string_view caller = lines[caller_index];
499 
500   if (caller.empty()) {
501     return "invalid_format";
502   }
503 
504   // On Posix platforms |callers| follows the following format:
505   //
506   // #<index> <address> <symbol>
507   //
508   // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
509   if (caller[0] == '#') {
510     const size_t address_start = caller.find(' ');
511     const size_t function_start = caller.find(' ', address_start + 1);
512 
513     if (address_start == caller.npos || function_start == caller.npos) {
514       return "invalid_format";
515     }
516 
517     return std::string(caller.substr(function_start + 1));
518   }
519 
520   // On Windows platforms |callers| follows the following format:
521   //
522   // \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
523   //
524   // See https://crsrc.org/c/base/debug/stack_trace_win.cc
525   if (caller[0] == '\t') {
526     const size_t symbol_start = 1;
527     const size_t symbol_end = caller.find(' ');
528     if (symbol_end == caller.npos) {
529       return "invalid_format";
530     }
531     return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
532   }
533 
534   // On Mac platforms |callers| follows the following format:
535   //
536   // <index> <library> 0x<address> <symbol> + <line>
537   //
538   // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
539   if (caller[0] >= '0' && caller[0] <= '9') {
540     const size_t address_start = caller.find("0x");
541     const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
542     const size_t symbol_end = caller.find(' ', symbol_start);
543     if (symbol_start == caller.npos || symbol_end == caller.npos) {
544       return "invalid_format";
545     }
546     return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
547   }
548 
549   return "invalid_format";
550 }
551 
ExtractDanglingPtrSignature(debug::TaskTrace task_trace)552 std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
553   if (task_trace.empty()) {
554     return "No active task";
555   }
556   return ExtractDanglingPtrSignature(task_trace.ToString());
557 }
558 
ExtractDanglingPtrSignature(std::optional<DanglingPointerFreeInfo> free_info,debug::StackTrace release_stack_trace,debug::TaskTrace release_task_trace)559 std::string ExtractDanglingPtrSignature(
560     std::optional<DanglingPointerFreeInfo> free_info,
561     debug::StackTrace release_stack_trace,
562     debug::TaskTrace release_task_trace) {
563   if (free_info) {
564     return StringPrintf(
565         "[DanglingSignature]\t%s\t%s\t%s\t%s",
566         ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
567         ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
568         ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
569         ExtractDanglingPtrSignature(release_task_trace).c_str());
570   }
571   return StringPrintf(
572       "[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
573       ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
574       ExtractDanglingPtrSignature(release_task_trace).c_str());
575 }
576 
operator ==(const debug::TaskTrace & lhs,const debug::TaskTrace & rhs)577 bool operator==(const debug::TaskTrace& lhs, const debug::TaskTrace& rhs) {
578   // Compare the addresses contained in the task traces.
579   // The task traces are at most |PendingTask::kTaskBacktraceLength| long.
580   std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_lhs = {};
581   std::array<const void*, PendingTask::kTaskBacktraceLength> addresses_rhs = {};
582   lhs.GetAddresses(addresses_lhs);
583   rhs.GetAddresses(addresses_rhs);
584   return addresses_lhs == addresses_rhs;
585 }
586 
587 template <features::DanglingPtrMode dangling_pointer_mode,
588           features::DanglingPtrType dangling_pointer_type>
DanglingRawPtrReleased(uintptr_t id)589 void DanglingRawPtrReleased(uintptr_t id) {
590   // This is called from raw_ptr<>'s release operation. Making allocations is
591   // allowed. In particular, symbolizing and printing the StackTraces may
592   // allocate memory.
593   debug::StackTrace stack_trace_release;
594   debug::TaskTrace task_trace_release;
595   std::optional<DanglingPointerFreeInfo> free_info =
596       TakeDanglingPointerFreeInfo(id);
597 
598   if constexpr (dangling_pointer_type ==
599                 features::DanglingPtrType::kCrossTask) {
600     if (!free_info) {
601       return;
602     }
603     if (task_trace_release == free_info->task_trace) {
604       return;
605     }
606   }
607 
608   std::string dangling_signature = ExtractDanglingPtrSignature(
609       free_info, stack_trace_release, task_trace_release);
610   static const char dangling_ptr_footer[] =
611       "\n"
612       "\n"
613       "Please check for more information on:\n"
614       "https://chromium.googlesource.com/chromium/src/+/main/docs/"
615       "dangling_ptr_guide.md\n"
616       "\n"
617       "Googlers: Please give us your feedback about the dangling pointer\n"
618       "          detector at:\n"
619       "          http://go/dangling-ptr-cq-survey\n";
620   if (free_info) {
621     LOG(ERROR) << "Detected dangling raw_ptr with id="
622                << StringPrintf("0x%016" PRIxPTR, id) << ":\n"
623                << dangling_signature << "\n\n"
624                << "The memory was freed at:\n"
625                << free_info->stack_trace << "\n"
626                << free_info->task_trace << "\n"
627                << "The dangling raw_ptr was released at:\n"
628                << stack_trace_release << "\n"
629                << task_trace_release << dangling_ptr_footer;
630   } else {
631     LOG(ERROR) << "Detected dangling raw_ptr with id="
632                << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
633                << dangling_signature << "\n\n"
634                << "It was not recorded where the memory was freed.\n\n"
635                << "The dangling raw_ptr was released at:\n"
636                << stack_trace_release << "\n"
637                << task_trace_release << dangling_ptr_footer;
638   }
639 
640   if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
641     ImmediateCrash();
642   }
643 }
644 
CheckDanglingRawPtrBufferEmpty()645 void CheckDanglingRawPtrBufferEmpty() {
646   internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
647 
648   // TODO(https://crbug.com/1425095): Check for leaked refcount on Android.
649 #if BUILDFLAG(IS_ANDROID)
650   g_stack_trace_buffer = DanglingRawPtrBuffer();
651 #else
652   bool errors = false;
653   for (auto entry : g_stack_trace_buffer) {
654     if (!entry) {
655       continue;
656     }
657     errors = true;
658     LOG(ERROR) << "A freed allocation is still referenced by a dangling "
659                   "pointer at exit, or at test end. Leaked raw_ptr/raw_ref "
660                   "could cause PartitionAlloc's quarantine memory bloat."
661                   "\n\n"
662                   "Memory was released on:\n"
663                << entry->task_trace << "\n"
664                << entry->stack_trace << "\n";
665 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER)
666     std::vector<std::array<const void*, 32>> stack_traces =
667         internal::InstanceTracer::GetStackTracesForDanglingRefs(entry->id);
668     for (const auto& raw_stack_trace : stack_traces) {
669       LOG(ERROR) << "Dangling reference from:\n";
670       LOG(ERROR) << debug::StackTrace(raw_stack_trace.data(),
671                                       raw_stack_trace.size() -
672                                           static_cast<size_t>(ranges::count(
673                                               raw_stack_trace, nullptr)))
674                  << "\n";
675     }
676 #else
677     LOG(ERROR) << "Building with enable_backup_ref_ptr_instance_tracer will "
678                   "print out stack traces of any live but dangling references.";
679 #endif
680   }
681   CHECK(!errors);
682 #endif
683 }
684 
685 }  // namespace
686 
InstallDanglingRawPtrChecks()687 void InstallDanglingRawPtrChecks() {
688   // Multiple tests can run within the same executable's execution. This line
689   // ensures problems detected from the previous test are causing error before
690   // entering the next one...
691   CheckDanglingRawPtrBufferEmpty();
692 
693   // ... similarly, some allocation may stay forever in the quarantine and we
694   // might ignore them if the executable exists. This line makes sure dangling
695   // pointers errors are never ignored, by crashing at exit, as a last resort.
696   // This makes quarantine memory bloat more likely to be detected.
697   static bool first_run_in_process = true;
698   if (first_run_in_process) {
699     first_run_in_process = false;
700     AtExitManager::RegisterTask(base::BindOnce(CheckDanglingRawPtrBufferEmpty));
701   }
702 
703   if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
704     partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
705     partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
706     return;
707   }
708 
709   partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
710   switch (features::kDanglingPtrModeParam.Get()) {
711     case features::DanglingPtrMode::kCrash:
712       switch (features::kDanglingPtrTypeParam.Get()) {
713         case features::DanglingPtrType::kAll:
714           partition_alloc::SetDanglingRawPtrReleasedFn(
715               &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
716                                       features::DanglingPtrType::kAll>);
717           break;
718         case features::DanglingPtrType::kCrossTask:
719           partition_alloc::SetDanglingRawPtrReleasedFn(
720               &DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
721                                       features::DanglingPtrType::kCrossTask>);
722           break;
723       }
724       break;
725     case features::DanglingPtrMode::kLogOnly:
726       switch (features::kDanglingPtrTypeParam.Get()) {
727         case features::DanglingPtrType::kAll:
728           partition_alloc::SetDanglingRawPtrReleasedFn(
729               &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
730                                       features::DanglingPtrType::kAll>);
731           break;
732         case features::DanglingPtrType::kCrossTask:
733           partition_alloc::SetDanglingRawPtrReleasedFn(
734               &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
735                                       features::DanglingPtrType::kCrossTask>);
736           break;
737       }
738       break;
739   }
740 }
741 
742 // TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
743 // is a dangling pointer, we should crash at some point. Consider providing an
744 // API to periodically check the buffer.
745 
746 #else   // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
InstallDanglingRawPtrChecks()747 void InstallDanglingRawPtrChecks() {}
748 #endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
749 
UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id)750 void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
751   PA_NO_CODE_FOLDING();
752   debug::DumpWithoutCrashing();
753 }
754 
UnretainedDanglingRawPtrDetectedCrash(uintptr_t id)755 void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
756   static const char unretained_dangling_ptr_footer[] =
757       "\n"
758       "\n"
759       "Please check for more information on:\n"
760       "https://chromium.googlesource.com/chromium/src/+/main/docs/"
761       "unretained_dangling_ptr_guide.md\n";
762   debug::TaskTrace task_trace;
763   debug::StackTrace stack_trace;
764   LOG(FATAL) << "Detected dangling raw_ptr in unretained with id="
765              << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
766              << task_trace << '\n'
767              << "Stack trace:\n"
768              << stack_trace << unretained_dangling_ptr_footer;
769 }
770 
InstallUnretainedDanglingRawPtrChecks()771 void InstallUnretainedDanglingRawPtrChecks() {
772   if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
773     partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
774     partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
775     return;
776   }
777 
778   partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
779   switch (features::kUnretainedDanglingPtrModeParam.Get()) {
780     case features::UnretainedDanglingPtrMode::kCrash:
781       partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
782           &UnretainedDanglingRawPtrDetectedCrash);
783       break;
784 
785     case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
786       partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
787           &UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
788       break;
789   }
790 }
791 
792 namespace {
793 
794 #if BUILDFLAG(USE_STARSCAN)
SetProcessNameForPCScan(const std::string & process_type)795 void SetProcessNameForPCScan(const std::string& process_type) {
796   const char* name = [&process_type] {
797     if (process_type.empty()) {
798       // Empty means browser process.
799       return "Browser";
800     }
801     if (process_type == switches::kRendererProcess) {
802       return "Renderer";
803     }
804     if (process_type == switches::kGpuProcess) {
805       return "Gpu";
806     }
807     if (process_type == switches::kUtilityProcess) {
808       return "Utility";
809     }
810     return static_cast<const char*>(nullptr);
811   }();
812 
813   if (name) {
814     partition_alloc::internal::PCScan::SetProcessName(name);
815   }
816 }
817 
EnablePCScanForMallocPartitionsIfNeeded()818 bool EnablePCScanForMallocPartitionsIfNeeded() {
819 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
820   partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
821       &base::PlatformThread::SetName);
822 
823   using Config = partition_alloc::internal::PCScan::InitConfig;
824   DCHECK(base::FeatureList::GetInstance());
825   if (base::FeatureList::IsEnabled(base::features::kPartitionAllocPCScan)) {
826     allocator_shim::EnablePCScan({Config::WantedWriteProtectionMode::kEnabled,
827                                   Config::SafepointMode::kEnabled});
828     base::allocator::RegisterPCScanStatsReporter();
829     return true;
830   }
831 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
832   return false;
833 }
834 
EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded()835 bool EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
836 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
837   using Config = partition_alloc::internal::PCScan::InitConfig;
838   DCHECK(base::FeatureList::GetInstance());
839   if (base::FeatureList::IsEnabled(
840           base::features::kPartitionAllocPCScanBrowserOnly)) {
841     const Config::WantedWriteProtectionMode wp_mode =
842         base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
843             ? Config::WantedWriteProtectionMode::kEnabled
844             : Config::WantedWriteProtectionMode::kDisabled;
845 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
846     CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
847         << "DCScan is currently only supported on Linux based systems";
848 #endif
849     allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kEnabled});
850     base::allocator::RegisterPCScanStatsReporter();
851     return true;
852   }
853 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
854   return false;
855 }
856 
EnablePCScanForMallocPartitionsInRendererProcessIfNeeded()857 bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
858 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
859   using Config = partition_alloc::internal::PCScan::InitConfig;
860   DCHECK(base::FeatureList::GetInstance());
861   if (base::FeatureList::IsEnabled(
862           base::features::kPartitionAllocPCScanRendererOnly)) {
863     const Config::WantedWriteProtectionMode wp_mode =
864         base::FeatureList::IsEnabled(base::features::kPartitionAllocDCScan)
865             ? Config::WantedWriteProtectionMode::kEnabled
866             : Config::WantedWriteProtectionMode::kDisabled;
867 #if !PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
868     CHECK_EQ(Config::WantedWriteProtectionMode::kDisabled, wp_mode)
869         << "DCScan is currently only supported on Linux based systems";
870 #endif
871     allocator_shim::EnablePCScan({wp_mode, Config::SafepointMode::kDisabled});
872     base::allocator::RegisterPCScanStatsReporter();
873     return true;
874   }
875 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
876   return false;
877 }
878 #endif  // BUILDFLAG(USE_STARSCAN)
879 
880 }  // namespace
881 
ReconfigurePartitionForKnownProcess(const std::string & process_type)882 void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
883   DCHECK_NE(process_type, switches::kZygoteProcess);
884   // TODO(keishi): Move the code to enable BRP back here after Finch
885   // experiments.
886 }
887 
Get()888 PartitionAllocSupport* PartitionAllocSupport::Get() {
889   static auto* singleton = new PartitionAllocSupport();
890   return singleton;
891 }
892 
893 PartitionAllocSupport::PartitionAllocSupport() = default;
894 
ReconfigureForTests()895 void PartitionAllocSupport::ReconfigureForTests() {
896   ReconfigureEarlyish("");
897   base::AutoLock scoped_lock(lock_);
898   called_for_tests_ = true;
899 }
900 
901 // static
ShouldEnableMemoryTagging(const std::string & process_type)902 bool PartitionAllocSupport::ShouldEnableMemoryTagging(
903     const std::string& process_type) {
904   // Check kPartitionAllocMemoryTagging first so the Feature is activated even
905   // when mte bootloader flag is disabled.
906   if (!base::FeatureList::IsEnabled(
907           base::features::kPartitionAllocMemoryTagging)) {
908     return false;
909   }
910   if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
911     return false;
912   }
913 
914   DCHECK(base::FeatureList::GetInstance());
915   if (base::FeatureList::IsEnabled(
916           base::features::kKillPartitionAllocMemoryTagging)) {
917     return false;
918   }
919   switch (base::features::kMemoryTaggingEnabledProcessesParam.Get()) {
920     case base::features::MemoryTaggingEnabledProcesses::kBrowserOnly:
921       return process_type.empty();
922     case base::features::MemoryTaggingEnabledProcesses::kNonRenderer:
923       return process_type != switches::kRendererProcess;
924     case base::features::MemoryTaggingEnabledProcesses::kAllProcesses:
925       return true;
926   }
927 }
928 
929 // static
ShouldEnableMemoryTaggingInRendererProcess()930 bool PartitionAllocSupport::ShouldEnableMemoryTaggingInRendererProcess() {
931   return ShouldEnableMemoryTagging(switches::kRendererProcess);
932 }
933 
934 // static
935 PartitionAllocSupport::BrpConfiguration
GetBrpConfiguration(const std::string & process_type)936 PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
937   // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
938   CHECK(base::FeatureList::GetInstance());
939 
940   bool process_affected_by_brp_flag = false;
941 #if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&  \
942      BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
943     BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
944   if (base::FeatureList::IsEnabled(
945           base::features::kPartitionAllocBackupRefPtr)) {
946     // No specified process type means this is the Browser process.
947     switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
948       case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
949         process_affected_by_brp_flag = process_type.empty();
950         break;
951       case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
952         process_affected_by_brp_flag =
953             process_type.empty() ||
954             (process_type == switches::kRendererProcess);
955         break;
956       case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
957         process_affected_by_brp_flag =
958             (process_type != switches::kRendererProcess);
959         break;
960       case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
961         process_affected_by_brp_flag = true;
962         break;
963     }
964   }
965 #endif  // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
966         // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
967         // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
968 
969   const bool enable_brp =
970 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
971     BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
972       // kDisabled is equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
973       process_affected_by_brp_flag &&
974       base::features::kBackupRefPtrModeParam.Get() !=
975           base::features::BackupRefPtrMode::kDisabled;
976 #else
977       false;
978 #endif
979 
980   return {
981       enable_brp,
982       process_affected_by_brp_flag,
983   };
984 }
985 
ReconfigureEarlyish(const std::string & process_type)986 void PartitionAllocSupport::ReconfigureEarlyish(
987     const std::string& process_type) {
988   {
989     base::AutoLock scoped_lock(lock_);
990 
991     // In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
992     // is earlier than ContentMain().
993     if (called_for_tests_) {
994       DCHECK(called_earlyish_);
995       return;
996     }
997 
998     // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
999     CHECK(!called_earlyish_)
1000         << "ReconfigureEarlyish was already called for process '"
1001         << established_process_type_ << "'; current process: '" << process_type
1002         << "'";
1003 
1004     called_earlyish_ = true;
1005     established_process_type_ = process_type;
1006   }
1007 
1008   if (process_type != switches::kZygoteProcess) {
1009     ReconfigurePartitionForKnownProcess(process_type);
1010   }
1011 
1012   // These initializations are only relevant for PartitionAlloc-Everywhere
1013   // builds.
1014 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1015   allocator_shim::EnablePartitionAllocMemoryReclaimer();
1016 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1017 }
1018 
ReconfigureAfterZygoteFork(const std::string & process_type)1019 void PartitionAllocSupport::ReconfigureAfterZygoteFork(
1020     const std::string& process_type) {
1021   {
1022     base::AutoLock scoped_lock(lock_);
1023     // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
1024     CHECK(!called_after_zygote_fork_)
1025         << "ReconfigureAfterZygoteFork was already called for process '"
1026         << established_process_type_ << "'; current process: '" << process_type
1027         << "'";
1028     DCHECK(called_earlyish_)
1029         << "Attempt to call ReconfigureAfterZygoteFork without calling "
1030            "ReconfigureEarlyish; current process: '"
1031         << process_type << "'";
1032     DCHECK_EQ(established_process_type_, switches::kZygoteProcess)
1033         << "Attempt to call ReconfigureAfterZygoteFork while "
1034            "ReconfigureEarlyish was called on non-zygote process '"
1035         << established_process_type_ << "'; current process: '" << process_type
1036         << "'";
1037 
1038     called_after_zygote_fork_ = true;
1039     established_process_type_ = process_type;
1040   }
1041 
1042   if (process_type != switches::kZygoteProcess) {
1043     ReconfigurePartitionForKnownProcess(process_type);
1044   }
1045 }
1046 
ReconfigureAfterFeatureListInit(const std::string & process_type,bool configure_dangling_pointer_detector)1047 void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
1048     const std::string& process_type,
1049     bool configure_dangling_pointer_detector) {
1050   if (configure_dangling_pointer_detector) {
1051     base::allocator::InstallDanglingRawPtrChecks();
1052   }
1053   base::allocator::InstallUnretainedDanglingRawPtrChecks();
1054   {
1055     base::AutoLock scoped_lock(lock_);
1056     // Avoid initializing more than once.
1057     if (called_after_feature_list_init_) {
1058       DCHECK_EQ(established_process_type_, process_type)
1059           << "ReconfigureAfterFeatureListInit was already called for process '"
1060           << established_process_type_ << "'; current process: '"
1061           << process_type << "'";
1062       return;
1063     }
1064     DCHECK(called_earlyish_)
1065         << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1066            "ReconfigureEarlyish; current process: '"
1067         << process_type << "'";
1068     DCHECK_NE(established_process_type_, switches::kZygoteProcess)
1069         << "Attempt to call ReconfigureAfterFeatureListInit without calling "
1070            "ReconfigureAfterZygoteFork; current process: '"
1071         << process_type << "'";
1072     DCHECK_EQ(established_process_type_, process_type)
1073         << "ReconfigureAfterFeatureListInit wasn't called for an already "
1074            "established process '"
1075         << established_process_type_ << "'; current process: '" << process_type
1076         << "'";
1077 
1078     called_after_feature_list_init_ = true;
1079   }
1080 
1081   DCHECK_NE(process_type, switches::kZygoteProcess);
1082   [[maybe_unused]] BrpConfiguration brp_config =
1083       GetBrpConfiguration(process_type);
1084 
1085 #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1086   if (brp_config.process_affected_by_brp_flag) {
1087     base::RawPtrAsanService::GetInstance().Configure(
1088         base::EnableDereferenceCheck(
1089             base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
1090         base::EnableExtractionCheck(
1091             base::features::kBackupRefPtrAsanEnableExtractionCheckParam.Get()),
1092         base::EnableInstantiationCheck(
1093             base::features::kBackupRefPtrAsanEnableInstantiationCheckParam
1094                 .Get()));
1095   } else {
1096     base::RawPtrAsanService::GetInstance().Configure(
1097         base::EnableDereferenceCheck(false), base::EnableExtractionCheck(false),
1098         base::EnableInstantiationCheck(false));
1099   }
1100 #endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
1101 
1102 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1103   auto bucket_distribution = allocator_shim::BucketDistribution::kNeutral;
1104   // No specified type means we are in the browser.
1105   switch (process_type == ""
1106               ? base::features::kPartitionAllocBucketDistributionParam.Get()
1107               : base::features::BucketDistributionMode::kDefault) {
1108     case base::features::BucketDistributionMode::kDefault:
1109       break;
1110     case base::features::BucketDistributionMode::kDenser:
1111       bucket_distribution = allocator_shim::BucketDistribution::kDenser;
1112       break;
1113   }
1114 
1115   const bool scheduler_loop_quarantine = base::FeatureList::IsEnabled(
1116       base::features::kPartitionAllocSchedulerLoopQuarantine);
1117   const size_t scheduler_loop_quarantine_capacity_in_bytes =
1118       static_cast<size_t>(
1119           base::features::kPartitionAllocSchedulerLoopQuarantineCapacity.Get());
1120   const bool zapping_by_free_flags = base::FeatureList::IsEnabled(
1121       base::features::kPartitionAllocZappingByFreeFlags);
1122 
1123 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
1124   const bool use_pool_offset_freelists =
1125       base::FeatureList::IsEnabled(base::features::kUsePoolOffsetFreelists);
1126 #else
1127   const bool use_pool_offset_freelists = false;
1128 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
1129 
1130   bool enable_memory_tagging = false;
1131   partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode =
1132       partition_alloc::TagViolationReportingMode::kUndefined;
1133 
1134 #if BUILDFLAG(HAS_MEMORY_TAGGING)
1135   // ShouldEnableMemoryTagging() checks kKillPartitionAllocMemoryTagging but
1136   // check here too to wrap the GetMemoryTaggingModeForCurrentThread() call.
1137   if (!base::FeatureList::IsEnabled(
1138           base::features::kKillPartitionAllocMemoryTagging)) {
1139     // If synchronous mode is enabled from startup it means this is a test and
1140     // memory tagging should be enabled.
1141     if (partition_alloc::internal::GetMemoryTaggingModeForCurrentThread() ==
1142         partition_alloc::TagViolationReportingMode::kSynchronous) {
1143       enable_memory_tagging = true;
1144       memory_tagging_reporting_mode =
1145           partition_alloc::TagViolationReportingMode::kSynchronous;
1146     } else {
1147       enable_memory_tagging = ShouldEnableMemoryTagging(process_type);
1148 #if BUILDFLAG(IS_ANDROID)
1149       if (enable_memory_tagging) {
1150         switch (base::features::kMemtagModeParam.Get()) {
1151           case base::features::MemtagMode::kSync:
1152             memory_tagging_reporting_mode =
1153                 partition_alloc::TagViolationReportingMode::kSynchronous;
1154             break;
1155           case base::features::MemtagMode::kAsync:
1156             memory_tagging_reporting_mode =
1157                 partition_alloc::TagViolationReportingMode::kAsynchronous;
1158             break;
1159         }
1160         partition_alloc::PermissiveMte::SetEnabled(base::FeatureList::IsEnabled(
1161             base::features::kPartitionAllocPermissiveMte));
1162         CHECK(partition_alloc::internal::
1163                   ChangeMemoryTaggingModeForAllThreadsPerProcess(
1164                       memory_tagging_reporting_mode));
1165         CHECK_EQ(
1166             partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1167             memory_tagging_reporting_mode);
1168       } else if (base::CPU::GetInstanceNoAllocation().has_mte()) {
1169         memory_tagging_reporting_mode =
1170             partition_alloc::TagViolationReportingMode::kDisabled;
1171         CHECK(partition_alloc::internal::
1172                   ChangeMemoryTaggingModeForAllThreadsPerProcess(
1173                       memory_tagging_reporting_mode));
1174         CHECK_EQ(
1175             partition_alloc::internal::GetMemoryTaggingModeForCurrentThread(),
1176             memory_tagging_reporting_mode);
1177       }
1178 #endif  // BUILDFLAG(IS_ANDROID)
1179     }
1180   }
1181 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
1182 
1183   if (enable_memory_tagging) {
1184     CHECK((memory_tagging_reporting_mode ==
1185            partition_alloc::TagViolationReportingMode::kSynchronous) ||
1186           (memory_tagging_reporting_mode ==
1187            partition_alloc::TagViolationReportingMode::kAsynchronous));
1188   } else {
1189     CHECK((memory_tagging_reporting_mode ==
1190            partition_alloc::TagViolationReportingMode::kUndefined) ||
1191           (memory_tagging_reporting_mode ==
1192            partition_alloc::TagViolationReportingMode::kDisabled));
1193   }
1194 
1195   allocator_shim::ConfigurePartitions(
1196       allocator_shim::EnableBrp(brp_config.enable_brp),
1197       allocator_shim::EnableMemoryTagging(enable_memory_tagging),
1198       memory_tagging_reporting_mode, bucket_distribution,
1199       allocator_shim::SchedulerLoopQuarantine(scheduler_loop_quarantine),
1200       scheduler_loop_quarantine_capacity_in_bytes,
1201       allocator_shim::ZappingByFreeFlags(zapping_by_free_flags),
1202       allocator_shim::UsePoolOffsetFreelists(use_pool_offset_freelists));
1203 
1204   const uint32_t extras_size = allocator_shim::GetMainPartitionRootExtrasSize();
1205   // As per description, extras are optional and are expected not to
1206   // exceed (cookie + max(BRP ref-count)) == 16 + 16 == 32 bytes.
1207   // 100 is a reasonable cap for this value.
1208   UmaHistogramCounts100("Memory.PartitionAlloc.PartitionRoot.ExtrasSize",
1209                         int(extras_size));
1210 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1211 
1212   // If BRP is not enabled, check if any of PCScan flags is enabled.
1213   [[maybe_unused]] bool scan_enabled = false;
1214 #if BUILDFLAG(USE_STARSCAN)
1215   if (!brp_config.enable_brp) {
1216     scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
1217     // No specified process type means this is the Browser process.
1218     if (process_type.empty()) {
1219       scan_enabled = scan_enabled ||
1220                      EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded();
1221     }
1222     if (process_type == switches::kRendererProcess) {
1223       scan_enabled = scan_enabled ||
1224                      EnablePCScanForMallocPartitionsInRendererProcessIfNeeded();
1225     }
1226     if (scan_enabled) {
1227       if (base::FeatureList::IsEnabled(
1228               base::features::kPartitionAllocPCScanStackScanning)) {
1229 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1230         partition_alloc::internal::PCScan::EnableStackScanning();
1231 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1232       }
1233       if (base::FeatureList::IsEnabled(
1234               base::features::kPartitionAllocPCScanImmediateFreeing)) {
1235         partition_alloc::internal::PCScan::EnableImmediateFreeing();
1236       }
1237       if (base::FeatureList::IsEnabled(
1238               base::features::kPartitionAllocPCScanEagerClearing)) {
1239         partition_alloc::internal::PCScan::SetClearType(
1240             partition_alloc::internal::PCScan::ClearType::kEager);
1241       }
1242       SetProcessNameForPCScan(process_type);
1243     }
1244   }
1245 #endif  // BUILDFLAG(USE_STARSCAN)
1246 
1247 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1248   partition_alloc::internal::StackTopRegistry::Get().NotifyThreadCreated(
1249       partition_alloc::internal::GetStackTop());
1250 
1251 #if BUILDFLAG(USE_STARSCAN)
1252   // Non-quarantinable partition is dealing with hot V8's zone allocations.
1253   // In case PCScan is enabled in Renderer, enable thread cache on this
1254   // partition. At the same time, thread cache on the main(malloc) partition
1255   // must be disabled, because only one partition can have it on.
1256   if (scan_enabled && process_type == switches::kRendererProcess) {
1257     allocator_shim::NonQuarantinableAllocator::Instance()
1258         .root()
1259         ->EnableThreadCacheIfSupported();
1260   } else
1261 #endif  // BUILDFLAG(USE_STARSCAN)
1262   {
1263     allocator_shim::internal::PartitionAllocMalloc::Allocator()
1264         ->EnableThreadCacheIfSupported();
1265   }
1266 
1267   if (base::FeatureList::IsEnabled(
1268           base::features::kPartitionAllocLargeEmptySlotSpanRing)) {
1269     allocator_shim::internal::PartitionAllocMalloc::Allocator()
1270         ->EnableLargeEmptySlotSpanRing();
1271   }
1272 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1273 
1274 #if BUILDFLAG(IS_WIN)
1275   // Browser process only, since this is the one we want to prevent from
1276   // crashing the most (as it takes down all the tabs).
1277   if (base::FeatureList::IsEnabled(
1278           base::features::kPageAllocatorRetryOnCommitFailure) &&
1279       process_type.empty()) {
1280     partition_alloc::SetRetryOnCommitFailure(true);
1281   }
1282 #endif
1283 }
1284 
ReconfigureAfterTaskRunnerInit(const std::string & process_type)1285 void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
1286     const std::string& process_type) {
1287   {
1288     base::AutoLock scoped_lock(lock_);
1289 
1290     // Init only once.
1291     if (called_after_thread_pool_init_) {
1292       return;
1293     }
1294 
1295     DCHECK_EQ(established_process_type_, process_type);
1296     // Enforce ordering.
1297     DCHECK(called_earlyish_);
1298     DCHECK(called_after_feature_list_init_);
1299 
1300     called_after_thread_pool_init_ = true;
1301   }
1302 
1303 #if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
1304     BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1305   // This should be called in specific processes, as the main thread is
1306   // initialized later.
1307   DCHECK(process_type != switches::kZygoteProcess);
1308 
1309   partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
1310       base::features::GetThreadCacheMinPurgeInterval(),
1311       base::features::GetThreadCacheMaxPurgeInterval(),
1312       base::features::GetThreadCacheDefaultPurgeInterval(),
1313       size_t(base::features::GetThreadCacheMinCachedMemoryForPurgingBytes()));
1314 
1315   base::allocator::StartThreadCachePeriodicPurge();
1316 
1317   if (base::FeatureList::IsEnabled(
1318           base::features::kEnableConfigurableThreadCacheMultiplier)) {
1319     // If kEnableConfigurableThreadCacheMultiplier is enabled, override the
1320     // multiplier value with the corresponding feature param.
1321 #if BUILDFLAG(IS_ANDROID)
1322     ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1323         base::features::GetThreadCacheMultiplierForAndroid());
1324 #else   // BUILDFLAG(IS_ANDROID)
1325     ::partition_alloc::ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1326         base::features::GetThreadCacheMultiplier());
1327 #endif  // BUILDFLAG(IS_ANDROID)
1328   } else {
1329 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1330     // If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
1331     // thread cache limits on Android low end device to avoid stranding too much
1332     // memory in the caches.
1333     if (SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
1334             features::kPartialLowEndModeExcludePartitionAllocSupport)) {
1335       ::partition_alloc::ThreadCacheRegistry::Instance()
1336           .SetThreadCacheMultiplier(
1337               ::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
1338     }
1339 #endif  // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
1340   }
1341 
1342   // Renderer processes are more performance-sensitive, increase thread cache
1343   // limits.
1344   if (process_type == switches::kRendererProcess &&
1345       base::FeatureList::IsEnabled(
1346           base::features::kPartitionAllocLargeThreadCacheSize)) {
1347     largest_cached_size_ =
1348         size_t(base::features::GetPartitionAllocLargeThreadCacheSizeValue());
1349 
1350 #if BUILDFLAG(IS_ANDROID)
1351     // Use appropriately lower amount for Android devices with 3GB or less.
1352     // Devices almost always report less physical memory than what they actually
1353     // have, so use 3.2GB (a threshold commonly uses throughout code) to avoid
1354     // accidentally catching devices advertised as 4GB.
1355     if (base::SysInfo::AmountOfPhysicalMemoryMB() < 3.2 * 1024) {
1356       largest_cached_size_ = size_t(
1357           base::features::
1358               GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid());
1359     }
1360 #endif  // BUILDFLAG(IS_ANDROID)
1361 
1362     ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1363   }
1364 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
1365         // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1366 
1367 #if BUILDFLAG(USE_STARSCAN)
1368   if (base::FeatureList::IsEnabled(
1369           base::features::kPartitionAllocPCScanMUAwareScheduler)) {
1370     // Assign PCScan a task-based scheduling backend.
1371     static base::NoDestructor<
1372         partition_alloc::internal::MUAwareTaskBasedBackend>
1373         mu_aware_task_based_backend{
1374             partition_alloc::internal::PCScan::scheduler(),
1375             &partition_alloc::internal::PCScan::PerformDelayedScan};
1376     partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
1377         *mu_aware_task_based_backend.get());
1378   }
1379 #endif  // BUILDFLAG(USE_STARSCAN)
1380 
1381 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1382   base::allocator::StartMemoryReclaimer(
1383       base::SingleThreadTaskRunner::GetCurrentDefault());
1384 #endif
1385 
1386   partition_alloc::PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
1387       base::FeatureList::IsEnabled(
1388           base::features::kPartitionAllocStraightenLargerSlotSpanFreeLists)
1389           ? features::kPartitionAllocStraightenLargerSlotSpanFreeListsMode.Get()
1390           : partition_alloc::StraightenLargerSlotSpanFreeListsMode::kNever);
1391   partition_alloc::PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(
1392       base::FeatureList::IsEnabled(
1393           base::features::kPartitionAllocSortSmallerSlotSpanFreeLists));
1394   partition_alloc::PartitionRoot::SetSortActiveSlotSpansEnabled(
1395       base::FeatureList::IsEnabled(
1396           base::features::kPartitionAllocSortActiveSlotSpans));
1397 }
1398 
OnForegrounded(bool has_main_frame)1399 void PartitionAllocSupport::OnForegrounded(bool has_main_frame) {
1400 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1401   {
1402     base::AutoLock scoped_lock(lock_);
1403     if (established_process_type_ != switches::kRendererProcess) {
1404       return;
1405     }
1406   }
1407 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1408   if (!base::FeatureList::IsEnabled(
1409           features::kLowerPAMemoryLimitForNonMainRenderers) ||
1410       has_main_frame) {
1411     ::partition_alloc::ThreadCache::SetLargestCachedSize(largest_cached_size_);
1412   }
1413 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1414   if (base::FeatureList::IsEnabled(
1415           features::kPartitionAllocAdjustSizeWhenInForeground)) {
1416     allocator_shim::AdjustDefaultAllocatorForForeground();
1417   }
1418 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1419 }
1420 
OnBackgrounded()1421 void PartitionAllocSupport::OnBackgrounded() {
1422 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1423   {
1424     base::AutoLock scoped_lock(lock_);
1425     if (established_process_type_ != switches::kRendererProcess) {
1426       return;
1427     }
1428   }
1429 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1430   // Performance matters less for background renderers, don't pay the memory
1431   // cost.
1432   ::partition_alloc::ThreadCache::SetLargestCachedSize(
1433       ::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold);
1434 
1435   // In renderers, memory reclaim uses the "idle time" task runner to run
1436   // periodic reclaim. This does not always run when the renderer is idle, and
1437   // in particular after the renderer gets backgrounded. As a result, empty slot
1438   // spans are potentially never decommitted. To mitigate that, run a one-off
1439   // reclaim a few seconds later. Even if the renderer comes back to foreground
1440   // in the meantime, the worst case is a few more system calls.
1441   //
1442   // TODO(lizeb): Remove once/if the behavior of idle tasks changes.
1443   base::PostDelayedMemoryReductionTask(
1444       base::SingleThreadTaskRunner::GetCurrentDefault(), FROM_HERE,
1445       base::BindOnce([]() {
1446         ::partition_alloc::MemoryReclaimer::Instance()->ReclaimAll();
1447       }),
1448       base::Seconds(10));
1449 
1450 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1451   if (base::FeatureList::IsEnabled(
1452           features::kPartitionAllocAdjustSizeWhenInForeground)) {
1453     allocator_shim::AdjustDefaultAllocatorForBackground();
1454   }
1455 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
1456 }
1457 
1458 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
ExtractDanglingPtrSignatureForTests(std::string stacktrace)1459 std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
1460     std::string stacktrace) {
1461   return ExtractDanglingPtrSignature(stacktrace);
1462 }
1463 #endif
1464 
1465 }  // namespace base::allocator
1466