1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
6 #define PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
7 
8 #include <algorithm>
9 #include <climits>
10 #include <cstddef>
11 #include <limits>
12 
13 #include "build/build_config.h"
14 #include "partition_alloc/address_pool_manager_types.h"
15 #include "partition_alloc/flags.h"
16 #include "partition_alloc/page_allocator_constants.h"
17 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_config.h"
20 #include "partition_alloc/partition_alloc_forward.h"
21 
22 #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
23 #include <mach/vm_page_size.h>
24 #endif
25 
26 #if BUILDFLAG(HAS_MEMORY_TAGGING)
27 #include "partition_alloc/tagging.h"
28 #endif
29 
30 namespace partition_alloc {
31 
32 namespace internal {
33 // Bit flag constants used as `flag` argument of PartitionRoot::Alloc<flags>,
34 // AlignedAlloc, etc.
35 enum class AllocFlags {
36   kNone = 0,
37   kReturnNull = 1 << 0,
38   kZeroFill = 1 << 1,
39   // Don't allow allocation override hooks. Override hooks are expected to
40   // check for the presence of this flag and return false if it is active.
41   kNoOverrideHooks = 1 << 2,
42   // Never let a memory tool like ASan (if active) perform the allocation.
43   kNoMemoryToolOverride = 1 << 3,
44   // Don't allow any hooks (override or observers).
45   kNoHooks = 1 << 4,  // Internal.
46   // If the allocation requires a "slow path" (such as allocating/committing a
47   // new slot span), return nullptr instead. Note this makes all large
48   // allocations return nullptr, such as direct-mapped ones, and even for
49   // smaller ones, a nullptr value is common.
50   kFastPathOrReturnNull = 1 << 5,  // Internal.
51   // An allocation override hook should tag the allocated memory for MTE.
52   kMemoryShouldBeTaggedForMte = 1 << 6,  // Internal.
53   kMaxValue = kMemoryShouldBeTaggedForMte,
54 };
55 PA_DEFINE_OPERATORS_FOR_FLAGS(AllocFlags);
56 
57 // Bit flag constants used as `flag` argument of PartitionRoot::Free<flags>.
58 enum class FreeFlags {
59   kNone = 0,
60   // See AllocFlags::kNoMemoryToolOverride.
61   kNoMemoryToolOverride = 1 << 0,
62   // Don't allow any hooks (override or observers).
63   kNoHooks = 1 << 1,  // Internal.
64   // Quarantine for a while to ensure no UaF from on-stack pointers.
65   kSchedulerLoopQuarantine = 1 << 2,
66   // Zap the object region on `Free()`.
67   kZap = 1 << 3,
68   kMaxValue = kZap,
69 };
70 PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
71 }  // namespace internal
72 
73 using internal::AllocFlags;
74 using internal::FreeFlags;
75 
76 namespace internal {
77 
78 // Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
79 // size, but as of 2021, most do. This is in particular the case for almost all
80 // x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
81 // static alignment, we cannot query the CPU at runtime to determine the actual
82 // alignment, so use 64 bytes everywhere. Since this is only used to avoid false
83 // sharing, getting this wrong only results in lower performance, not incorrect
84 // code.
85 constexpr size_t kPartitionCachelineSize = 64;
86 
87 // Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
88 // It is typical for a `PartitionPage` to be based on multiple system pages.
89 // Most references to "page" refer to `PartitionPage`s.
90 //
91 // *Super pages* are the underlying system allocations we make. Super pages
92 // contain multiple partition pages and include space for a small amount of
93 // metadata per partition page.
94 //
95 // Inside super pages, we store *slot spans*. A slot span is a continguous range
96 // of one or more `PartitionPage`s that stores allocations of the same size.
97 // Slot span sizes are adjusted depending on the allocation size, to make sure
98 // the packing does not lead to unused (wasted) space at the end of the last
99 // system page of the span. For our current maximum slot span size of 64 KiB and
100 // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
101 // up against the end of a system page.
102 
103 #if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64)
104 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()105 PartitionPageShift() {
106   return 16;  // 64 KiB
107 }
108 #elif defined(ARCH_CPU_PPC64)
109 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()110 PartitionPageShift() {
111   return 18;  // 256 KiB
112 }
113 #elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
114     defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
115 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()116 PartitionPageShift() {
117   return PageAllocationGranularityShift() + 2;
118 }
119 #else
120 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift()121 PartitionPageShift() {
122   return 14;  // 16 KiB
123 }
124 #endif
125 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageSize()126 PartitionPageSize() {
127   return 1 << PartitionPageShift();
128 }
129 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageOffsetMask()130 PartitionPageOffsetMask() {
131   return PartitionPageSize() - 1;
132 }
133 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageBaseMask()134 PartitionPageBaseMask() {
135   return ~PartitionPageOffsetMask();
136 }
137 
138 // Number of system pages per regular slot span. Above this limit, we call it
139 // a single-slot span, as the span literally hosts only one slot, and has
140 // somewhat different implementation. At run-time, single-slot spans can be
141 // differentiated with a call to CanStoreRawSize().
142 // TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
143 // ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
144 constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
145 
146 // To avoid fragmentation via never-used freelist entries, we hand out partition
147 // freelist sections gradually, in units of the dominant system page size. What
148 // we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
149 // with freelist pointers right away. Writing freelist pointers will fault and
150 // dirty a private page, which is very wasteful if we never actually store
151 // objects there.
152 
153 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumSystemPagesPerPartitionPage()154 NumSystemPagesPerPartitionPage() {
155   return PartitionPageSize() >> SystemPageShift();
156 }
157 
158 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxSystemPagesPerRegularSlotSpan()159 MaxSystemPagesPerRegularSlotSpan() {
160   return NumSystemPagesPerPartitionPage() *
161          kMaxPartitionPagesPerRegularSlotSpan;
162 }
163 
164 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MaxRegularSlotSpanSize()165 MaxRegularSlotSpanSize() {
166   return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
167 }
168 
169 // The maximum size that is used in an alternate bucket distribution. After this
170 // threshold, we only have 1 slot per slot-span, so external fragmentation
171 // doesn't matter. So, using the alternate bucket distribution after this
172 // threshold has no benefit, and only increases internal fragmentation.
173 //
174 // We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
175 // this is not constexpr on all platforms, so on other platforms we hardcode it,
176 // even though this may be too low, e.g. on systems with a page size >4KiB.
177 constexpr size_t kHighThresholdForAlternateDistribution =
178 #if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
179     MaxRegularSlotSpanSize();
180 #else
181     1 << 16;
182 #endif
183 
184 // We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
185 // These chunks are called *super pages*. We do this so that we can store
186 // metadata in the first few pages of each 2 MiB-aligned section. This makes
187 // freeing memory very fast. 2 MiB size & alignment were chosen, because this
188 // virtual address block represents a full but single page table allocation on
189 // ARM, ia32 and x64, which may be slightly more performance&memory efficient.
190 // (Note, these super pages are backed by 4 KiB system pages and have nothing to
191 // do with OS concept of "huge pages"/"large pages", even though the size
192 // coincides.)
193 //
194 // The layout of the super page is as follows. The sizes below are the same for
195 // 32- and 64-bit platforms.
196 //
197 //     +-----------------------+
198 //     | Guard page (4 KiB)    |
199 //     | Metadata page (4 KiB) |
200 //     | Guard pages (8 KiB)   |
201 //     | Free Slot Bitmap      |
202 //     | *Scan State Bitmap    |
203 //     | Slot span             |
204 //     | Slot span             |
205 //     | ...                   |
206 //     | Slot span             |
207 //     | Guard pages (16 KiB)  |
208 //     +-----------------------+
209 //
210 // Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
211 // Bitmap is inserted for partitions that may have quarantine enabled.
212 //
213 // If ENABLE_BACKUP_REF_PTR_SUPPORT is on, InSlotMetadataTable(4KiB) is inserted
214 // after the Metadata page, which hosts what normally would be in-slot metadata,
215 // but for reasons described in InSlotMetadataPointer() can't always be placed
216 // inside the slot. BRP ref-count is there, hence the connection with
217 // ENABLE_BACKUP_REF_PTR_SUPPORT.
218 // The guard page after the table is reduced to 4KiB.
219 //
220 //...
221 //     | Metadata page (4 KiB)       |
222 //     | InSlotMetadataTable (4 KiB) |
223 //     | Guard pages (4 KiB)         |
224 //...
225 //
226 // Each slot span is a contiguous range of one or more `PartitionPage`s. Note
227 // that slot spans of different sizes may co-exist with one super page. Even
228 // slot spans of the same size may support different slot sizes. However, all
229 // slots within a span have to be of the same size.
230 //
231 // The metadata page has the following format. Note that the `PartitionPage`
232 // that is not at the head of a slot span is "unused" (by most part, it only
233 // stores the offset from the head page). In other words, the metadata for the
234 // slot span is stored only in the first `PartitionPage` of the slot span.
235 // Metadata accesses to other `PartitionPage`s are redirected to the first
236 // `PartitionPage`.
237 //
238 //     +---------------------------------------------+
239 //     | SuperPageExtentEntry (32 B)                 |
240 //     | PartitionPage of slot span 1 (32 B, used)   |
241 //     | PartitionPage of slot span 1 (32 B, unused) |
242 //     | PartitionPage of slot span 1 (32 B, unused) |
243 //     | PartitionPage of slot span 2 (32 B, used)   |
244 //     | PartitionPage of slot span 3 (32 B, used)   |
245 //     | ...                                         |
246 //     | PartitionPage of slot span N (32 B, used)   |
247 //     | PartitionPage of slot span N (32 B, unused) |
248 //     | PartitionPage of slot span N (32 B, unused) |
249 //     +---------------------------------------------+
250 //
251 // A direct-mapped page has an identical layout at the beginning to fake it
252 // looking like a super page:
253 //
254 //     +---------------------------------+
255 //     | Guard page (4 KiB)              |
256 //     | Metadata page (4 KiB)           |
257 //     | Guard pages (8 KiB)             |
258 //     | Direct mapped object            |
259 //     | Guard page (4 KiB, 32-bit only) |
260 //     +---------------------------------+
261 //
262 // A direct-mapped page's metadata page has the following layout (on 64 bit
263 // architectures. On 32 bit ones, the layout is identical, some sizes are
264 // different due to smaller pointers.):
265 //
266 //     +----------------------------------+
267 //     | SuperPageExtentEntry (32 B)      |
268 //     | PartitionPage (32 B)             |
269 //     | PartitionBucket (40 B)           |
270 //     | PartitionDirectMapExtent (32 B)  |
271 //     +----------------------------------+
272 //
273 // See |PartitionDirectMapMetadata| for details.
274 
275 constexpr size_t kGiB = 1024 * 1024 * 1024ull;
276 constexpr size_t kSuperPageShift = 21;  // 2 MiB
277 constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
278 constexpr size_t kSuperPageAlignment = kSuperPageSize;
279 constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
280 constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
281 
282 // PartitionAlloc's address space is split into pools. See `glossary.md`.
283 
284 enum pool_handle : unsigned {
285   kNullPoolHandle = 0u,
286 
287   kRegularPoolHandle,
288   kBRPPoolHandle,
289 #if BUILDFLAG(HAS_64_BIT_POINTERS)
290   kConfigurablePoolHandle,
291 #endif
292 
293 // New pool_handles will be added here.
294 
295 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
296   // The thread isolated pool must come last since we write-protect its entry in
297   // the metadata tables, e.g. AddressPoolManager::aligned_pools_
298   kThreadIsolatedPoolHandle,
299 #endif
300   kMaxPoolHandle
301 };
302 
303 // kNullPoolHandle doesn't have metadata, hence - 1
304 constexpr size_t kNumPools = kMaxPoolHandle - 1;
305 
306 // Maximum pool size. With exception of Configurable Pool, it is also
307 // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
308 // allows to choose a different size at initialization time for certain
309 // configurations.
310 //
311 // Special-case Android and iOS, which incur test failures with larger
312 // pools. Regardless, allocating >8GiB with malloc() on these platforms is
313 // unrealistic as of 2022.
314 //
315 // When pointer compression is enabled, we cannot use large pools (at most
316 // 8GB for each of the glued pools).
317 #if BUILDFLAG(HAS_64_BIT_POINTERS)
318 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || \
319     BUILDFLAG(ENABLE_POINTER_COMPRESSION)
320 constexpr size_t kPoolMaxSize = 8 * kGiB;
321 #else
322 constexpr size_t kPoolMaxSize = 16 * kGiB;
323 #endif
324 #else  // BUILDFLAG(HAS_64_BIT_POINTERS)
325 constexpr size_t kPoolMaxSize = 4 * kGiB;
326 #endif
327 constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
328 
329 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
330 static_assert(kThreadIsolatedPoolHandle == kNumPools,
331               "The thread isolated pool must come last since we write-protect "
332               "its metadata.");
333 #endif
334 
335 // Slots larger than this size will not receive MTE protection. Pages intended
336 // for allocations larger than this constant should not be backed with PROT_MTE
337 // (which saves shadow tag memory). We also save CPU cycles by skipping tagging
338 // of large areas which are less likely to benefit from MTE protection.
339 constexpr size_t kMaxMemoryTaggingSize = 1024;
340 
341 #if BUILDFLAG(HAS_MEMORY_TAGGING)
342 // Returns whether the tag of |object| overflowed, meaning the containing slot
343 // needs to be moved to quarantine.
HasOverflowTag(void * object)344 PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
345   // The tag with which the slot is put to quarantine.
346   constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
347   static_assert((kOverflowTag & kPtrTagMask) != 0,
348                 "Overflow tag must be in tag bits");
349   return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
350 }
351 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
352 
353 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
NumPartitionPagesPerSuperPage()354 NumPartitionPagesPerSuperPage() {
355   return kSuperPageSize >> PartitionPageShift();
356 }
357 
MaxSuperPagesInPool()358 PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
359   return kMaxSuperPagesInPool;
360 }
361 
362 #if BUILDFLAG(HAS_64_BIT_POINTERS)
363 // In 64-bit mode, the direct map allocation granularity is super page size,
364 // because this is the reservation granularity of the pools.
DirectMapAllocationGranularity()365 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
366   return kSuperPageSize;
367 }
368 
DirectMapAllocationGranularityShift()369 PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
370   return kSuperPageShift;
371 }
372 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
373 // In 32-bit mode, address space is space is a scarce resource. Use the system
374 // allocation granularity, which is the lowest possible address space allocation
375 // unit. However, don't go below partition page size, so that pool bitmaps
376 // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
377 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularity()378 DirectMapAllocationGranularity() {
379   return std::max(PageAllocationGranularity(), PartitionPageSize());
380 }
381 
382 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityShift()383 DirectMapAllocationGranularityShift() {
384   return std::max(PageAllocationGranularityShift(), PartitionPageShift());
385 }
386 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
387 
388 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
DirectMapAllocationGranularityOffsetMask()389 DirectMapAllocationGranularityOffsetMask() {
390   return DirectMapAllocationGranularity() - 1;
391 }
392 
393 // The "order" of an allocation is closely related to the power-of-1 size of the
394 // allocation. More precisely, the order is the bit index of the
395 // most-significant-bit in the allocation size, where the bit numbers starts at
396 // index 1 for the least-significant-bit.
397 //
398 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
399 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
400 
401 // PartitionAlloc should return memory properly aligned for any type, to behave
402 // properly as a generic allocator. This is not strictly required as long as
403 // types are explicitly allocated with PartitionAlloc, but is to use it as a
404 // malloc() implementation, and generally to match malloc()'s behavior.
405 //
406 // In practice, this means 8 bytes alignment on 32 bit architectures, and 16
407 // bytes on 64 bit ones.
408 //
409 // Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
410 constexpr size_t kMinBucketedOrder =
411     kAlignment == 16 ? 5 : 4;  // 2^(order - 1), that is 16 or 8.
412 // The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
413 constexpr size_t kMaxBucketedOrder = 20;
414 constexpr size_t kNumBucketedOrders =
415     (kMaxBucketedOrder - kMinBucketedOrder) + 1;
416 // 8 buckets per order (for the higher orders).
417 // Note: this is not what is used by default, but the maximum amount of buckets
418 // per order. By default, only 4 are used.
419 constexpr size_t kNumBucketsPerOrderBits = 3;
420 constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
421 constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
422 constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
423 constexpr size_t kMaxBucketSpacing =
424     1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
425 constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
426                                 ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
427 // Limit when downsizing a direct mapping using `realloc`:
428 constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
429 // Intentionally set to less than 2GiB to make sure that a 2GiB allocation
430 // fails. This is a security choice in Chrome, to help making size_t vs int bugs
431 // harder to exploit.
432 
433 // The definition of MaxDirectMapped does only depend on constants that are
434 // unconditionally constexpr. Therefore it is not necessary to use
435 // PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
MaxDirectMapped()436 PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
437   // Subtract kSuperPageSize to accommodate for granularity inside
438   // PartitionRoot::GetDirectMapReservationSize.
439   return (1UL << 31) - kSuperPageSize;
440 }
441 
442 // Max alignment supported by AlignedAlloc().
443 // kSuperPageSize alignment can't be easily supported, because each super page
444 // starts with guard pages & metadata.
445 constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
446 
447 constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
448 
449 // When a SlotSpan becomes empty, the allocator tries to avoid re-using it
450 // immediately, to help with fragmentation. At this point, it becomes dirty
451 // committed memory, which we want to minimize. This could be decommitted
452 // immediately, but that would imply doing a lot of system calls. In particular,
453 // for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
454 // system calls.
455 //
456 // As an intermediate step, empty SlotSpans are placed into a per-partition
457 // global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
458 // before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
459 // the place used by a previous one will lead the previous SlotSpan to be
460 // decommitted immediately, provided that it is still empty.
461 //
462 // Setting this value higher means giving more time for reuse to happen, at the
463 // cost of possibly increasing peak committed memory usage (and increasing the
464 // size of PartitionRoot a bit, since the ring buffer is there). Note that the
465 // ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
466 // *not* removed from it when re-used. So the ring buffer really is a buffer of
467 // *possibly* empty SlotSpans.
468 //
469 // In all cases, PartitionRoot::PurgeMemory() with the
470 // PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
471 // in the ring buffer, so with periodic purge enabled, this typically happens
472 // every few seconds.
473 #if BUILDFLAG(USE_LARGE_EMPTY_SLOT_SPAN_RING)
474 // USE_LARGE_EMPTY_SLOT_SPAN_RING results in two size. kMaxEmptyCacheIndexBits,
475 // which is used when the renderer is in the foreground, and
476 // kMinEmptyCacheIndexBits which is used when the renderer is in the background.
477 constexpr size_t kMaxEmptyCacheIndexBits = 10;
478 constexpr size_t kMinEmptyCacheIndexBits = 7;
479 #else
480 constexpr size_t kMaxEmptyCacheIndexBits = 7;
481 constexpr size_t kMinEmptyCacheIndexBits = 7;
482 #endif
483 static_assert(kMinEmptyCacheIndexBits <= kMaxEmptyCacheIndexBits,
484               "min size must be <= max size");
485 // kMaxFreeableSpans is the buffer size, but is never used as an index value,
486 // hence <= is appropriate.
487 constexpr size_t kMaxFreeableSpans = 1 << kMaxEmptyCacheIndexBits;
488 constexpr size_t kMinFreeableSpans = 1 << kMinEmptyCacheIndexBits;
489 constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
490 
491 // If the total size in bytes of allocated but not committed pages exceeds this
492 // value (probably it is a "out of virtual address space" crash), a special
493 // crash stack trace is generated at
494 // `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
495 // of virtual address space" from "out of physical memory" in crash reports.
496 constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1 GiB
497 
498 // These byte values match tcmalloc.
499 constexpr unsigned char kUninitializedByte = 0xAB;
500 constexpr unsigned char kFreedByte = 0xCD;
501 
502 constexpr unsigned char kQuarantinedByte = 0xEF;
503 
504 // 1 is smaller than anything we can use, as it is not properly aligned. Not
505 // using a large size, since PartitionBucket::slot_size is a uint32_t, and
506 // static_cast<uint32_t>(-1) is too close to a "real" size.
507 constexpr size_t kInvalidBucketSize = 1;
508 
509 #if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
510 // Requested size that requires the hack.
511 constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
512 #endif
513 
514 }  // namespace internal
515 
516 // These constants are used outside PartitionAlloc itself, so we provide
517 // non-internal aliases here.
518 using ::partition_alloc::internal::kInvalidBucketSize;
519 using ::partition_alloc::internal::kMaxSuperPagesInPool;
520 using ::partition_alloc::internal::kMaxSupportedAlignment;
521 using ::partition_alloc::internal::kNumBuckets;
522 using ::partition_alloc::internal::kSuperPageSize;
523 using ::partition_alloc::internal::MaxDirectMapped;
524 using ::partition_alloc::internal::PartitionPageSize;
525 
526 }  // namespace partition_alloc
527 
528 #endif  // PARTITION_ALLOC_PARTITION_ALLOC_CONSTANTS_H_
529