1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
6 
7 #include <atomic>
8 #include <bit>
9 #include <cstddef>
10 #include <map>
11 #include <string>
12 #include <tuple>
13 
14 #include "build/build_config.h"
15 #include "partition_alloc/allocation_guard.h"
16 #include "partition_alloc/chromecast_buildflags.h"
17 #include "partition_alloc/memory_reclaimer.h"
18 #include "partition_alloc/partition_alloc.h"
19 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
20 #include "partition_alloc/partition_alloc_base/no_destructor.h"
21 #include "partition_alloc/partition_alloc_base/numerics/checked_math.h"
22 #include "partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
23 #include "partition_alloc/partition_alloc_buildflags.h"
24 #include "partition_alloc/partition_alloc_check.h"
25 #include "partition_alloc/partition_alloc_constants.h"
26 #include "partition_alloc/partition_root.h"
27 #include "partition_alloc/partition_stats.h"
28 #include "partition_alloc/shim/allocator_dispatch.h"
29 #include "partition_alloc/shim/allocator_shim_internals.h"
30 #include "partition_alloc/shim/nonscannable_allocator.h"
31 
32 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
33 #include <malloc.h>
34 #endif
35 
36 using allocator_shim::AllocatorDispatch;
37 
38 namespace {
39 
40 class SimpleScopedSpinLocker {
41  public:
SimpleScopedSpinLocker(std::atomic<bool> & lock)42   explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
43     // Lock. Semantically equivalent to base::Lock::Acquire().
44     bool expected = false;
45     // Weak CAS since we are in a retry loop, relaxed ordering for failure since
46     // in this case we don't imply any ordering.
47     //
48     // This matches partition_allocator/spinning_mutex.h fast path on Linux.
49     while (!lock_.compare_exchange_weak(
50         expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
51       expected = false;
52     }
53   }
54 
~SimpleScopedSpinLocker()55   ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
56 
57  private:
58   std::atomic<bool>& lock_;
59 };
60 
61 // We can't use a "static local" or a base::LazyInstance, as:
62 // - static local variables call into the runtime on Windows, which is not
63 //   prepared to handle it, as the first allocation happens during CRT init.
64 // - We don't want to depend on base::LazyInstance, which may be converted to
65 //   static locals one day.
66 //
67 // Nevertheless, this provides essentially the same thing.
68 template <typename T, typename Constructor>
69 class LeakySingleton {
70  public:
71   constexpr LeakySingleton() = default;
72 
Get()73   PA_ALWAYS_INLINE T* Get() {
74     auto* instance = instance_.load(std::memory_order_acquire);
75     if (PA_LIKELY(instance)) {
76       return instance;
77     }
78 
79     return GetSlowPath();
80   }
81 
82   // Replaces the instance pointer with a new one.
Replace(T * new_instance)83   void Replace(T* new_instance) {
84     SimpleScopedSpinLocker scoped_lock{initialization_lock_};
85 
86     // Modify under the lock to avoid race between |if (instance)| and
87     // |instance_.store()| in GetSlowPath().
88     instance_.store(new_instance, std::memory_order_release);
89   }
90 
91  private:
92   T* GetSlowPath();
93 
94   std::atomic<T*> instance_;
95   // Before C++20, having an initializer here causes a "variable does not have a
96   // constant initializer" error.  In C++20, omitting it causes a similar error.
97   // Presumably this is due to the C++20 changes to make atomic initialization
98   // (of the other members of this class) sane, so guarding under that
99   // feature-test.
100 #if !defined(__cpp_lib_atomic_value_initialization) || \
101     __cpp_lib_atomic_value_initialization < 201911L
102   alignas(T) uint8_t instance_buffer_[sizeof(T)];
103 #else
104   alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
105 #endif
106   std::atomic<bool> initialization_lock_;
107 };
108 
109 template <typename T, typename Constructor>
GetSlowPath()110 T* LeakySingleton<T, Constructor>::GetSlowPath() {
111   // The instance has not been set, the proper way to proceed (correct
112   // double-checked locking) is:
113   //
114   // auto* instance = instance_.load(std::memory_order_acquire);
115   // if (!instance) {
116   //   ScopedLock initialization_lock;
117   //   root = instance_.load(std::memory_order_relaxed);
118   //   if (root)
119   //     return root;
120   //   instance = Create new root;
121   //   instance_.store(instance, std::memory_order_release);
122   //   return instance;
123   // }
124   //
125   // However, we don't want to use a base::Lock here, so instead we use
126   // compare-and-exchange on a lock variable, which provides the same
127   // guarantees.
128   SimpleScopedSpinLocker scoped_lock{initialization_lock_};
129 
130   T* instance = instance_.load(std::memory_order_relaxed);
131   // Someone beat us.
132   if (instance) {
133     return instance;
134   }
135 
136   instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
137   instance_.store(instance, std::memory_order_release);
138 
139   return instance;
140 }
141 
142 class MainPartitionConstructor {
143  public:
New(void * buffer)144   static partition_alloc::PartitionRoot* New(void* buffer) {
145     partition_alloc::PartitionOptions opts;
146     // Only one partition can have thread cache enabled. Since, additional
147     // partitions are created in ReconfigureAfterFeatureListInit(), postpone
148     // the decision to turn the thread cache on until then.
149     // Also tests, such as the ThreadCache tests create a thread cache.
150     opts.thread_cache = partition_alloc::PartitionOptions::kDisabled;
151     opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
152     opts.backup_ref_ptr = partition_alloc::PartitionOptions::kDisabled;
153     auto* new_root = new (buffer) partition_alloc::PartitionRoot(opts);
154 
155     return new_root;
156   }
157 };
158 
159 LeakySingleton<partition_alloc::PartitionRoot, MainPartitionConstructor> g_root
160     PA_CONSTINIT = {};
Allocator()161 partition_alloc::PartitionRoot* Allocator() {
162   return g_root.Get();
163 }
164 
165 // Original g_root_ if it was replaced by ConfigurePartitions().
166 std::atomic<partition_alloc::PartitionRoot*> g_original_root(nullptr);
167 
168 std::atomic<bool> g_roots_finalized = false;
169 
OriginalAllocator()170 partition_alloc::PartitionRoot* OriginalAllocator() {
171   return g_original_root.load(std::memory_order_relaxed);
172 }
173 
AllocatorConfigurationFinalized()174 bool AllocatorConfigurationFinalized() {
175   return g_roots_finalized.load();
176 }
177 
AllocateAlignedMemory(size_t alignment,size_t size)178 void* AllocateAlignedMemory(size_t alignment, size_t size) {
179   // Memory returned by the regular allocator *always* respects |kAlignment|,
180   // which is a power of two, and any valid alignment is also a power of two. So
181   // we can directly fulfill these requests with the regular Alloc function.
182   //
183   // There are several call sites in Chromium where base::AlignedAlloc is called
184   // with a small alignment. Some may be due to overly-careful code, some are
185   // because the client code doesn't know the required alignment at compile
186   // time.
187   if (alignment <= partition_alloc::internal::kAlignment) {
188     // This is mandated by |posix_memalign()| and friends, so should never fire.
189     PA_CHECK(std::has_single_bit(alignment));
190     // TODO(bartekn): See if the compiler optimizes branches down the stack on
191     // Mac, where PartitionPageSize() isn't constexpr.
192     return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(
193         size);
194   }
195 
196   return Allocator()->AlignedAllocInline<partition_alloc::AllocFlags::kNoHooks>(
197       alignment, size);
198 }
199 
200 }  // namespace
201 
202 namespace allocator_shim::internal {
203 
PartitionMalloc(const AllocatorDispatch *,size_t size,void * context)204 void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
205   partition_alloc::ScopedDisallowAllocations guard{};
206   return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
207 }
208 
PartitionMallocUnchecked(const AllocatorDispatch *,size_t size,void * context)209 void* PartitionMallocUnchecked(const AllocatorDispatch*,
210                                size_t size,
211                                void* context) {
212   partition_alloc::ScopedDisallowAllocations guard{};
213   return Allocator()
214       ->AllocInline<partition_alloc::AllocFlags::kReturnNull |
215                     partition_alloc::AllocFlags::kNoHooks>(size);
216 }
217 
PartitionCalloc(const AllocatorDispatch *,size_t n,size_t size,void * context)218 void* PartitionCalloc(const AllocatorDispatch*,
219                       size_t n,
220                       size_t size,
221                       void* context) {
222   partition_alloc::ScopedDisallowAllocations guard{};
223   const size_t total =
224       partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
225   return Allocator()
226       ->AllocInline<partition_alloc::AllocFlags::kZeroFill |
227                     partition_alloc::AllocFlags::kNoHooks>(total);
228 }
229 
PartitionMemalign(const AllocatorDispatch *,size_t alignment,size_t size,void * context)230 void* PartitionMemalign(const AllocatorDispatch*,
231                         size_t alignment,
232                         size_t size,
233                         void* context) {
234   partition_alloc::ScopedDisallowAllocations guard{};
235   return AllocateAlignedMemory(alignment, size);
236 }
237 
PartitionAlignedAlloc(const AllocatorDispatch * dispatch,size_t size,size_t alignment,void * context)238 void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
239                             size_t size,
240                             size_t alignment,
241                             void* context) {
242   partition_alloc::ScopedDisallowAllocations guard{};
243   return AllocateAlignedMemory(alignment, size);
244 }
245 
246 // aligned_realloc documentation is
247 // https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
248 // TODO(tasak): Expand the given memory block to the given size if possible.
249 // This realloc always free the original memory block and allocates a new memory
250 // block.
251 // TODO(tasak): Implement PartitionRoot::AlignedRealloc and use it.
PartitionAlignedRealloc(const AllocatorDispatch * dispatch,void * address,size_t size,size_t alignment,void * context)252 void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
253                               void* address,
254                               size_t size,
255                               size_t alignment,
256                               void* context) {
257   partition_alloc::ScopedDisallowAllocations guard{};
258   void* new_ptr = nullptr;
259   if (size > 0) {
260     new_ptr = AllocateAlignedMemory(alignment, size);
261   } else {
262     // size == 0 and address != null means just "free(address)".
263     if (address) {
264       partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
265           partition_alloc::FreeFlags::kNoHooks>(address);
266     }
267   }
268   // The original memory block (specified by address) is unchanged if ENOMEM.
269   if (!new_ptr) {
270     return nullptr;
271   }
272   // TODO(tasak): Need to compare the new alignment with the address' alignment.
273   // If the two alignments are not the same, need to return nullptr with EINVAL.
274   if (address) {
275     size_t usage = partition_alloc::PartitionRoot::GetUsableSize(address);
276     size_t copy_size = usage > size ? size : usage;
277     memcpy(new_ptr, address, copy_size);
278 
279     partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
280         partition_alloc::FreeFlags::kNoHooks>(address);
281   }
282   return new_ptr;
283 }
284 
PartitionRealloc(const AllocatorDispatch *,void * address,size_t size,void * context)285 void* PartitionRealloc(const AllocatorDispatch*,
286                        void* address,
287                        size_t size,
288                        void* context) {
289   partition_alloc::ScopedDisallowAllocations guard{};
290 #if BUILDFLAG(IS_APPLE)
291   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
292                       reinterpret_cast<uintptr_t>(address)) &&
293                   address)) {
294     // A memory region allocated by the system allocator is passed in this
295     // function.  Forward the request to `realloc` which supports zone-
296     // dispatching so that it appropriately selects the right zone.
297     return realloc(address, size);
298   }
299 #endif  // BUILDFLAG(IS_APPLE)
300 
301   return Allocator()->Realloc<partition_alloc::AllocFlags::kNoHooks>(address,
302                                                                      size, "");
303 }
304 
305 #if BUILDFLAG(PA_IS_CAST_ANDROID)
306 extern "C" {
307 void __real_free(void*);
308 }       // extern "C"
309 #endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
310 
PartitionFree(const AllocatorDispatch *,void * object,void * context)311 void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
312   partition_alloc::ScopedDisallowAllocations guard{};
313 #if BUILDFLAG(IS_APPLE)
314   // TODO(bartekn): Add MTE unmasking here (and below).
315   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
316                       reinterpret_cast<uintptr_t>(object)) &&
317                   object)) {
318     // A memory region allocated by the system allocator is passed in this
319     // function.  Forward the request to `free` which supports zone-
320     // dispatching so that it appropriately selects the right zone.
321     return free(object);
322   }
323 #endif  // BUILDFLAG(IS_APPLE)
324 
325   // On Android Chromecast devices, there is at least one case where a system
326   // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
327   // the pointer, pass it along. This should not have a runtime cost vs regular
328   // Android, since on Android we have a PA_CHECK() rather than the branch here.
329 #if BUILDFLAG(PA_IS_CAST_ANDROID)
330   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
331                       reinterpret_cast<uintptr_t>(object)) &&
332                   object)) {
333     // A memory region allocated by the system allocator is passed in this
334     // function.  Forward the request to `free()`, which is `__real_free()`
335     // here.
336     return __real_free(object);
337   }
338 #endif  // BUILDFLAG(PA_IS_CAST_ANDROID)
339 
340   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
341       partition_alloc::FreeFlags::kNoHooks>(object);
342 }
343 
344 #if BUILDFLAG(IS_APPLE)
345 // Normal free() path on Apple OSes:
346 // 1. size = GetSizeEstimate(ptr);
347 // 2. if (size) FreeDefiniteSize(ptr, size)
348 //
349 // So we don't need to re-check that the pointer is owned in Free(), and we
350 // can use the size.
PartitionFreeDefiniteSize(const AllocatorDispatch *,void * address,size_t size,void * context)351 void PartitionFreeDefiniteSize(const AllocatorDispatch*,
352                                void* address,
353                                size_t size,
354                                void* context) {
355   partition_alloc::ScopedDisallowAllocations guard{};
356   // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
357   // still useful though, as we avoid double-checking that the address is owned.
358   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
359       partition_alloc::FreeFlags::kNoHooks>(address);
360 }
361 #endif  // BUILDFLAG(IS_APPLE)
362 
PartitionGetSizeEstimate(const AllocatorDispatch *,void * address,void * context)363 size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
364                                 void* address,
365                                 void* context) {
366   // This is used to implement malloc_usable_size(3). Per its man page, "if ptr
367   // is NULL, 0 is returned".
368   if (!address) {
369     return 0;
370   }
371 
372 #if BUILDFLAG(IS_APPLE)
373   if (!partition_alloc::IsManagedByPartitionAlloc(
374           reinterpret_cast<uintptr_t>(address))) {
375     // The object pointed to by `address` is not allocated by the
376     // PartitionAlloc.  The return value `0` means that the pointer does not
377     // belong to this malloc zone.
378     return 0;
379   }
380 #endif  // BUILDFLAG(IS_APPLE)
381 
382   // TODO(lizeb): Returns incorrect values for aligned allocations.
383   const size_t size =
384       partition_alloc::PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(
385           address);
386 #if BUILDFLAG(IS_APPLE)
387   // The object pointed to by `address` is allocated by the PartitionAlloc.
388   // So, this function must not return zero so that the malloc zone dispatcher
389   // finds the appropriate malloc zone.
390   PA_DCHECK(size);
391 #endif  // BUILDFLAG(IS_APPLE)
392   return size;
393 }
394 
395 #if BUILDFLAG(IS_APPLE)
PartitionGoodSize(const AllocatorDispatch *,size_t size,void * context)396 size_t PartitionGoodSize(const AllocatorDispatch*, size_t size, void* context) {
397   return Allocator()->AllocationCapacityFromRequestedSize(size);
398 }
399 
PartitionClaimedAddress(const AllocatorDispatch *,void * address,void * context)400 bool PartitionClaimedAddress(const AllocatorDispatch*,
401                              void* address,
402                              void* context) {
403   return partition_alloc::IsManagedByPartitionAlloc(
404       reinterpret_cast<uintptr_t>(address));
405 }
406 #endif  // BUILDFLAG(IS_APPLE)
407 
PartitionBatchMalloc(const AllocatorDispatch *,size_t size,void ** results,unsigned num_requested,void * context)408 unsigned PartitionBatchMalloc(const AllocatorDispatch*,
409                               size_t size,
410                               void** results,
411                               unsigned num_requested,
412                               void* context) {
413   // No real batching: we could only acquire the lock once for instance, keep it
414   // simple for now.
415   for (unsigned i = 0; i < num_requested; i++) {
416     // No need to check the results, we crash if it fails.
417     results[i] = PartitionMalloc(nullptr, size, nullptr);
418   }
419 
420   // Either all succeeded, or we crashed.
421   return num_requested;
422 }
423 
PartitionBatchFree(const AllocatorDispatch *,void ** to_be_freed,unsigned num_to_be_freed,void * context)424 void PartitionBatchFree(const AllocatorDispatch*,
425                         void** to_be_freed,
426                         unsigned num_to_be_freed,
427                         void* context) {
428   // No real batching: we could only acquire the lock once for instance, keep it
429   // simple for now.
430   for (unsigned i = 0; i < num_to_be_freed; i++) {
431     PartitionFree(nullptr, to_be_freed[i], nullptr);
432   }
433 }
434 
435 #if BUILDFLAG(IS_APPLE)
PartitionTryFreeDefault(const AllocatorDispatch *,void * address,void * context)436 void PartitionTryFreeDefault(const AllocatorDispatch*,
437                              void* address,
438                              void* context) {
439   partition_alloc::ScopedDisallowAllocations guard{};
440 
441   if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
442           reinterpret_cast<uintptr_t>(address)))) {
443     // The object pointed to by `address` is not allocated by the
444     // PartitionAlloc. Call find_zone_and_free.
445     return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
446   }
447 
448   partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
449       partition_alloc::FreeFlags::kNoHooks>(address);
450 }
451 #endif  // BUILDFLAG(IS_APPLE)
452 
453 // static
AllocatorConfigurationFinalized()454 bool PartitionAllocMalloc::AllocatorConfigurationFinalized() {
455   return ::AllocatorConfigurationFinalized();
456 }
457 
458 // static
Allocator()459 partition_alloc::PartitionRoot* PartitionAllocMalloc::Allocator() {
460   return ::Allocator();
461 }
462 
463 // static
OriginalAllocator()464 partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
465   return ::OriginalAllocator();
466 }
467 
468 }  // namespace allocator_shim::internal
469 
470 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
471 
472 namespace allocator_shim {
473 
EnablePartitionAllocMemoryReclaimer()474 void EnablePartitionAllocMemoryReclaimer() {
475   // Unlike other partitions, Allocator() does not register its PartitionRoot to
476   // the memory reclaimer, because doing so may allocate memory. Thus, the
477   // registration to the memory reclaimer has to be done some time later, when
478   // the main root is fully configured.
479   ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
480       Allocator());
481 
482   // There is only one PartitionAlloc-Everywhere partition at the moment. Any
483   // additional partitions will be created in ConfigurePartitions() and
484   // registered for memory reclaimer there.
485   PA_DCHECK(!AllocatorConfigurationFinalized());
486   PA_DCHECK(OriginalAllocator() == nullptr);
487 }
488 
ConfigurePartitions(EnableBrp enable_brp,EnableMemoryTagging enable_memory_tagging,partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,BucketDistribution distribution,SchedulerLoopQuarantine scheduler_loop_quarantine,size_t scheduler_loop_quarantine_capacity_in_bytes,ZappingByFreeFlags zapping_by_free_flags,UsePoolOffsetFreelists use_pool_offset_freelists)489 void ConfigurePartitions(
490     EnableBrp enable_brp,
491     EnableMemoryTagging enable_memory_tagging,
492     partition_alloc::TagViolationReportingMode memory_tagging_reporting_mode,
493     BucketDistribution distribution,
494     SchedulerLoopQuarantine scheduler_loop_quarantine,
495     size_t scheduler_loop_quarantine_capacity_in_bytes,
496     ZappingByFreeFlags zapping_by_free_flags,
497     UsePoolOffsetFreelists use_pool_offset_freelists
498 
499 ) {
500   // Calling Get() is actually important, even if the return value isn't
501   // used, because it has a side effect of initializing the variable, if it
502   // wasn't already.
503   auto* current_root = g_root.Get();
504 
505   // We've been bitten before by using a static local when initializing a
506   // partition. For synchronization, static local variables call into the
507   // runtime on Windows, which may not be ready to handle it, if the path is
508   // invoked on an allocation during the runtime initialization.
509   // ConfigurePartitions() is invoked explicitly from Chromium code, so this
510   // shouldn't bite us here. Mentioning just in case we move this code earlier.
511   static partition_alloc::internal::base::NoDestructor<
512       partition_alloc::PartitionAllocator>
513       new_main_allocator([&]() {
514         partition_alloc::PartitionOptions opts;
515         // The caller of ConfigurePartitions() will decide whether this or
516         // another partition will have the thread cache enabled, by calling
517         // EnableThreadCacheIfSupported().
518         opts.thread_cache = partition_alloc::PartitionOptions::kDisabled;
519         opts.star_scan_quarantine = partition_alloc::PartitionOptions::kAllowed;
520         opts.backup_ref_ptr =
521             enable_brp ? partition_alloc::PartitionOptions::kEnabled
522                        : partition_alloc::PartitionOptions::kDisabled;
523         opts.zapping_by_free_flags =
524             zapping_by_free_flags
525                 ? partition_alloc::PartitionOptions::kEnabled
526                 : partition_alloc::PartitionOptions::kDisabled;
527         opts.scheduler_loop_quarantine =
528             scheduler_loop_quarantine
529                 ? partition_alloc::PartitionOptions::kEnabled
530                 : partition_alloc::PartitionOptions::kDisabled;
531         opts.scheduler_loop_quarantine_capacity_in_bytes =
532             scheduler_loop_quarantine_capacity_in_bytes;
533         opts.memory_tagging = {
534             .enabled = enable_memory_tagging
535                            ? partition_alloc::PartitionOptions::kEnabled
536                            : partition_alloc::PartitionOptions::kDisabled,
537             .reporting_mode = memory_tagging_reporting_mode};
538         opts.use_pool_offset_freelists =
539             use_pool_offset_freelists
540                 ? partition_alloc::PartitionOptions::kEnabled
541                 : partition_alloc::PartitionOptions::kDisabled;
542         return opts;
543       }());
544   partition_alloc::PartitionRoot* new_root = new_main_allocator->root();
545 
546   // Now switch traffic to the new partition.
547   g_original_root = current_root;
548   g_root.Replace(new_root);
549 
550   // Purge memory, now that the traffic to the original partition is cut off.
551   current_root->PurgeMemory(
552       partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
553       partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
554 
555   switch (distribution) {
556     case BucketDistribution::kNeutral:
557       // We start in the 'default' case.
558       break;
559     case BucketDistribution::kDenser:
560       new_root->SwitchToDenserBucketDistribution();
561       break;
562   }
563 
564   PA_CHECK(!g_roots_finalized.exchange(true));  // Ensure configured once.
565 }
566 
567 // No synchronization provided: `PartitionRoot.flags` is only written
568 // to in `PartitionRoot::Init()`.
GetMainPartitionRootExtrasSize()569 uint32_t GetMainPartitionRootExtrasSize() {
570 #if PA_CONFIG(EXTRAS_REQUIRED)
571   return g_root.Get()->settings.extras_size;
572 #else
573   return 0;
574 #endif  // PA_CONFIG(EXTRAS_REQUIRED)
575 }
576 
577 #if BUILDFLAG(USE_STARSCAN)
EnablePCScan(partition_alloc::internal::PCScan::InitConfig config)578 void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
579   partition_alloc::internal::PCScan::Initialize(config);
580 
581   PA_CHECK(AllocatorConfigurationFinalized());
582   partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
583   if (OriginalAllocator() != nullptr) {
584     partition_alloc::internal::PCScan::RegisterScannableRoot(
585         OriginalAllocator());
586   }
587 
588   allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled();
589   allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
590 }
591 #endif  // BUILDFLAG(USE_STARSCAN)
592 
AdjustDefaultAllocatorForForeground()593 void AdjustDefaultAllocatorForForeground() {
594   Allocator()->AdjustForForeground();
595 }
596 
AdjustDefaultAllocatorForBackground()597 void AdjustDefaultAllocatorForBackground() {
598   Allocator()->AdjustForBackground();
599 }
600 
601 }  // namespace allocator_shim
602 
603 const AllocatorDispatch AllocatorDispatch::default_dispatch = {
604     &allocator_shim::internal::PartitionMalloc,  // alloc_function
605     &allocator_shim::internal::
606         PartitionMallocUnchecked,  // alloc_unchecked_function
607     &allocator_shim::internal::
608         PartitionCalloc,  // alloc_zero_initialized_function
609     &allocator_shim::internal::PartitionMemalign,  // alloc_aligned_function
610     &allocator_shim::internal::PartitionRealloc,   // realloc_function
611     &allocator_shim::internal::PartitionFree,      // free_function
612     &allocator_shim::internal::
613         PartitionGetSizeEstimate,  // get_size_estimate_function
614 #if BUILDFLAG(IS_APPLE)
615     &allocator_shim::internal::PartitionGoodSize,        // good_size
616     &allocator_shim::internal::PartitionClaimedAddress,  // claimed_address
617 #else
618     nullptr,  // good_size
619     nullptr,  // claimed_address
620 #endif
621     &allocator_shim::internal::PartitionBatchMalloc,  // batch_malloc_function
622     &allocator_shim::internal::PartitionBatchFree,    // batch_free_function
623 #if BUILDFLAG(IS_APPLE)
624     // On Apple OSes, free_definite_size() is always called from free(), since
625     // get_size_estimate() is used to determine whether an allocation belongs to
626     // the current zone. It makes sense to optimize for it.
627     &allocator_shim::internal::PartitionFreeDefiniteSize,
628     // On Apple OSes, try_free_default() is sometimes called as an optimization
629     // of free().
630     &allocator_shim::internal::PartitionTryFreeDefault,
631 #else
632     nullptr,  // free_definite_size_function
633     nullptr,  // try_free_default_function
634 #endif
635     &allocator_shim::internal::
636         PartitionAlignedAlloc,  // aligned_malloc_function
637     &allocator_shim::internal::
638         PartitionAlignedRealloc,               // aligned_realloc_function
639     &allocator_shim::internal::PartitionFree,  // aligned_free_function
640     nullptr,                                   // next
641 };
642 
643 // Intercept diagnostics symbols as well, even though they are not part of the
644 // unified shim layer.
645 //
646 // TODO(lizeb): Implement the ones that doable.
647 
648 extern "C" {
649 
650 #if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
651 
malloc_stats(void)652 SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
653 
mallopt(int cmd,int value)654 SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
655   return 0;
656 }
657 
658 #endif  // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
659 
660 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
mallinfo(void)661 SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
662   partition_alloc::SimplePartitionStatsDumper allocator_dumper;
663   Allocator()->DumpStats("malloc", true, &allocator_dumper);
664   // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
665   // Dump stats for nonscannable and nonquarantinable allocators.
666   auto& nonscannable_allocator =
667       allocator_shim::NonScannableAllocator::Instance();
668   partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
669   if (auto* nonscannable_root = nonscannable_allocator.root()) {
670     nonscannable_root->DumpStats("malloc", true,
671                                  &nonscannable_allocator_dumper);
672   }
673   auto& nonquarantinable_allocator =
674       allocator_shim::NonQuarantinableAllocator::Instance();
675   partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
676   if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) {
677     nonquarantinable_root->DumpStats("malloc", true,
678                                      &nonquarantinable_allocator_dumper);
679   }
680 
681   struct mallinfo info = {};
682   info.arena = 0;  // Memory *not* allocated with mmap().
683 
684   // Memory allocated with mmap(), aka virtual size.
685   info.hblks =
686       partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
687           allocator_dumper.stats().total_mmapped_bytes +
688           nonscannable_allocator_dumper.stats().total_mmapped_bytes +
689           nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
690   // Resident bytes.
691   info.hblkhd =
692       partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
693           allocator_dumper.stats().total_resident_bytes +
694           nonscannable_allocator_dumper.stats().total_resident_bytes +
695           nonquarantinable_allocator_dumper.stats().total_resident_bytes);
696   // Allocated bytes.
697   info.uordblks =
698       partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
699           allocator_dumper.stats().total_active_bytes +
700           nonscannable_allocator_dumper.stats().total_active_bytes +
701           nonquarantinable_allocator_dumper.stats().total_active_bytes);
702 
703   return info;
704 }
705 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
706 
707 }  // extern "C"
708 
709 #if BUILDFLAG(IS_APPLE)
710 
711 namespace allocator_shim {
712 
InitializeDefaultAllocatorPartitionRoot()713 void InitializeDefaultAllocatorPartitionRoot() {
714   // On OS_APPLE, the initialization of PartitionRoot uses memory allocations
715   // internally, e.g. __builtin_available, and it's not easy to avoid it.
716   // Thus, we initialize the PartitionRoot with using the system default
717   // allocator before we intercept the system default allocator.
718   std::ignore = Allocator();
719 }
720 
721 }  // namespace allocator_shim
722 
723 #endif  // BUILDFLAG(IS_APPLE)
724 
725 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
726