1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef PARTITION_ALLOC_IN_SLOT_METADATA_H_
6 #define PARTITION_ALLOC_IN_SLOT_METADATA_H_
7
8 #include <atomic>
9 #include <bit>
10 #include <cstddef>
11 #include <cstdint>
12 #include <limits>
13
14 #include "build/build_config.h"
15 #include "partition_alloc/dangling_raw_ptr_checks.h"
16 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
17 #include "partition_alloc/partition_alloc_base/component_export.h"
18 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
19 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_check.h"
22 #include "partition_alloc/partition_alloc_config.h"
23 #include "partition_alloc/partition_alloc_constants.h"
24 #include "partition_alloc/partition_alloc_forward.h"
25 #include "partition_alloc/tagging.h"
26
27 #if BUILDFLAG(IS_APPLE)
28 #include "partition_alloc/partition_alloc_base/bits.h"
29 #endif // BUILDFLAG(IS_APPLE)
30
31 namespace partition_alloc::internal {
32
33 // Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for
34 // crash. Workaround was introduced for MacOS 13: https://crbug.com/1378822. But
35 // it has been enabled by default because MacOS 14 and later seems to need it
36 // too. https://crbug.com/1457756
37 // Enabled on iOS as a workaround for a speculative bug in Swift's
38 // __StringStorage.create https://crbug.com/327804972
39 //
40 // Placed outside `BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
41 // intentionally to accommodate usage in contexts also outside
42 // this gating.
43 PA_ALWAYS_INLINE size_t
AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size)44 AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
45 #if BUILDFLAG(IS_APPLE)
46 return internal::base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
47 #else
48 return in_slot_metadata_size;
49 #endif // BUILDFLAG(IS_APPLE)
50 }
51
52 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
53
54 namespace {
55 // Utility functions to define a bit field.
56 template <typename CountType>
SafeShift(CountType lhs,int rhs)57 static constexpr CountType SafeShift(CountType lhs, int rhs) {
58 return rhs >= std::numeric_limits<CountType>::digits ? 0 : lhs << rhs;
59 }
60 template <typename CountType>
61 struct BitField {
NoneBitField62 static constexpr CountType None() { return CountType(0); }
BitBitField63 static constexpr CountType Bit(int n_th) {
64 return SafeShift<CountType>(1, n_th);
65 }
66 // Mask with bits between `lo` and `hi` (both inclusive) set.
MaskBitField67 static constexpr CountType Mask(int lo, int hi) {
68 return (SafeShift<CountType>(1, hi + 1) - 1) &
69 ~(SafeShift<CountType>(1, lo) - 1);
70 }
71 };
72 } // namespace
73
74 // Special-purpose atomic bit field class mainly used by RawPtrBackupRefImpl.
75 // Formerly known as `PartitionRefCount`, but renamed to support usage that is
76 // unrelated to BRP.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)77 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
78 public:
79 // This class holds an atomic 32 bits field: `count_`. It holds 4 values:
80 //
81 // bits name description
82 // ----- --------------------- ----------------------------------------
83 // 0 is_allocated Whether or not the memory is held by the
84 // allocator.
85 // - 1 at construction time.
86 // - Decreased in ReleaseFromAllocator();
87 // - We check whether this bit is set in
88 // `ReleaseFromAllocator()`, and if not we
89 // have a double-free.
90 //
91 // 1-29 ptr_count Number of raw_ptr<T>.
92 // - Increased in Acquire()
93 // - Decreased in Release()
94 //
95 // 30 request_quarantine When set, PA will quarantine the memory in
96 // Scheduler-Loop quarantine.
97 // It also extends quarantine duration when
98 // set after being quarantined.
99 // 31 needs_mac11_malloc_ Whether malloc_size() return value needs to
100 // size_hack be adjusted for this allocation.
101 //
102 // On `BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)` builds, it holds two more
103 // entries in total of 64 bits.
104 //
105 // bits name description
106 // ----- --------------------- ----------------------------------------
107 // 0 is_allocated
108 // 1-31 ptr_count
109 //
110 // 32 dangling_detected A dangling raw_ptr<> has been detected.
111 // 33 needs_mac11_malloc_
112 // size_hack
113 // 34 request_quarantine
114 //
115 // 35-63 unprotected_ptr_count Number of
116 // raw_ptr<T, DisableDanglingPtrDetection>
117 // - Increased in AcquireFromUnprotectedPtr().
118 // - Decreased in ReleaseFromUnprotectedPtr().
119 //
120 // The allocation is reclaimed if all of:
121 // - |is_allocated|
122 // - |ptr_count|
123 // - |unprotected_ptr_count|
124 // are zero.
125 //
126 // During ReleaseFromAllocator(), if |ptr_count| is not zero,
127 // |dangling_detected| is set and the error is reported via
128 // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
129 // called when the last raw_ptr<> is released.
130 #if !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
131 using CountType = uint32_t;
132 static constexpr CountType kMemoryHeldByAllocatorBit =
133 BitField<CountType>::Bit(0);
134 static constexpr CountType kPtrCountMask = BitField<CountType>::Mask(1, 29);
135 static constexpr CountType kRequestQuarantineBit =
136 BitField<CountType>::Bit(30);
137 static constexpr CountType kNeedsMac11MallocSizeHackBit =
138 BitField<CountType>::Bit(31);
139 static constexpr CountType kDanglingRawPtrDetectedBit =
140 BitField<CountType>::None();
141 static constexpr CountType kUnprotectedPtrCountMask =
142 BitField<CountType>::None();
143 #else // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
144 using CountType = uint64_t;
145 static constexpr auto kMemoryHeldByAllocatorBit = BitField<CountType>::Bit(0);
146 static constexpr auto kPtrCountMask = BitField<CountType>::Mask(1, 31);
147 static constexpr auto kDanglingRawPtrDetectedBit =
148 BitField<CountType>::Bit(32);
149 static constexpr auto kNeedsMac11MallocSizeHackBit =
150 BitField<CountType>::Bit(33);
151 static constexpr CountType kRequestQuarantineBit =
152 BitField<CountType>::Bit(34);
153 static constexpr auto kUnprotectedPtrCountMask =
154 BitField<CountType>::Mask(35, 63);
155 #endif // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
156
157 // Quick check to assert these masks do not overlap.
158 static_assert((kMemoryHeldByAllocatorBit + kPtrCountMask +
159 kUnprotectedPtrCountMask + kDanglingRawPtrDetectedBit +
160 kRequestQuarantineBit + kNeedsMac11MallocSizeHackBit) ==
161 std::numeric_limits<CountType>::max());
162
163 static constexpr auto kPtrInc =
164 SafeShift<CountType>(1, std::countr_zero(kPtrCountMask));
165 static constexpr auto kUnprotectedPtrInc =
166 SafeShift<CountType>(1, std::countr_zero(kUnprotectedPtrCountMask));
167
168 PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack);
169
170 // Incrementing the counter doesn't imply any visibility about modified
171 // memory, hence relaxed atomics. For decrement, visibility is required before
172 // the memory gets freed, necessitating an acquire/release barrier before
173 // freeing the memory.
174 //
175 // For details, see base::AtomicRefCount, which has the same constraints and
176 // characteristics.
177 //
178 // FYI: The assembly produced by the compiler on every platform, in particular
179 // the uint64_t fetch_add on 32bit CPU.
180 // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
181 PA_ALWAYS_INLINE void Acquire() {
182 CheckCookieIfSupported();
183
184 CountType old_count = count_.fetch_add(kPtrInc, std::memory_order_relaxed);
185 // Check overflow.
186 PA_CHECK((old_count & kPtrCountMask) != kPtrCountMask);
187 }
188
189 // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
190 // instead of raw_ptr<T>.
191 PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
192 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
193 CheckCookieIfSupported();
194 CountType old_count =
195 count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
196 // Check overflow.
197 PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
198 kUnprotectedPtrCountMask);
199 #else
200 Acquire();
201 #endif
202 }
203
204 // Returns true if the allocation should be reclaimed.
205 PA_ALWAYS_INLINE bool Release() {
206 CheckCookieIfSupported();
207
208 CountType old_count = count_.fetch_sub(kPtrInc, std::memory_order_release);
209 // Check underflow.
210 PA_DCHECK(old_count & kPtrCountMask);
211
212 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
213 // If a dangling raw_ptr<> was detected, report it.
214 if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
215 kDanglingRawPtrDetectedBit)) {
216 partition_alloc::internal::DanglingRawPtrReleased(
217 reinterpret_cast<uintptr_t>(this));
218 }
219 #endif
220
221 return ReleaseCommon(old_count - kPtrInc);
222 }
223
224 // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
225 // instead of raw_ptr<T>.
226 PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
227 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
228 CheckCookieIfSupported();
229
230 CountType old_count =
231 count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
232 // Check underflow.
233 PA_DCHECK(old_count & kUnprotectedPtrCountMask);
234
235 return ReleaseCommon(old_count - kUnprotectedPtrInc);
236 #else
237 return Release();
238 #endif
239 }
240
241 // `PreReleaseFromAllocator()` performs what `ReleaseFromAllocator()` does
242 // partially in a way that supports multiple calls.
243 // This function can be used when allocation is sent to quarantine to perform
244 // dangling `raw_ptr` checks before quarantine, not after.
245 PA_ALWAYS_INLINE void PreReleaseFromAllocator() {
246 CheckCookieIfSupported();
247 CheckDanglingPointersOnFree(count_.load(std::memory_order_relaxed));
248 }
249
250 // Returns true if the allocation should be reclaimed.
251 // This function should be called by the allocator during Free().
252 PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
253 CheckCookieIfSupported();
254
255 // TODO(bartekn): Make the double-free check more effective. Once freed, the
256 // in-slot metadata is overwritten by an encoded freelist-next pointer.
257 CountType old_count =
258 count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
259
260 if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
261 DoubleFreeOrCorruptionDetected(old_count);
262 }
263
264 // Release memory when no raw_ptr<> exists anymore:
265 static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
266 if (PA_LIKELY((old_count & mask) == 0)) {
267 std::atomic_thread_fence(std::memory_order_acquire);
268 // The allocation is about to get freed, so clear the cookie.
269 ClearCookieIfSupported();
270 return true;
271 }
272
273 CheckDanglingPointersOnFree(old_count);
274 return false;
275 }
276
277 // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
278 // raw_ptr<T> references. There may be other references from raw pointers or
279 // unique_ptr, but we have no way of tracking them, so we hope for the best.
280 // To summarize, the function returns whether we believe the allocation can be
281 // safely freed.
282 PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
283 CheckCookieIfSupported();
284 static constexpr CountType mask =
285 kMemoryHeldByAllocatorBit | kPtrCountMask | kUnprotectedPtrCountMask;
286 return (count_.load(std::memory_order_acquire) & mask) ==
287 kMemoryHeldByAllocatorBit;
288 }
289
290 PA_ALWAYS_INLINE bool IsAlive() {
291 bool alive =
292 count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
293 if (alive) {
294 CheckCookieIfSupported();
295 }
296 return alive;
297 }
298
299 // Called when a raw_ptr is not banning dangling ptrs, but the user still
300 // wants to ensure the pointer is not currently dangling. This is currently
301 // used in UnretainedWrapper to make sure callbacks are not invoked with
302 // dangling pointers. If such a raw_ptr exists but the allocation is no longer
303 // alive, then we have a dangling pointer to a dead object.
304 PA_ALWAYS_INLINE void ReportIfDangling() {
305 if (!IsAlive()) {
306 partition_alloc::internal::UnretainedDanglingRawPtrDetected(
307 reinterpret_cast<uintptr_t>(this));
308 }
309 }
310
311 // Request to quarantine this allocation. The request might be ignored if
312 // the allocation is already freed.
313 PA_ALWAYS_INLINE void SetQuarantineRequest() {
314 CountType old_count =
315 count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed);
316 // This bit cannot be used after the memory is freed.
317 PA_DCHECK(old_count & kMemoryHeldByAllocatorBit);
318 }
319
320 // Get and clear out quarantine request.
321 PA_ALWAYS_INLINE bool PopQuarantineRequest() {
322 CountType old_count =
323 count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel);
324 // This bit cannot be used after the memory is freed.
325 PA_DCHECK(old_count & kMemoryHeldByAllocatorBit);
326 return old_count & kRequestQuarantineBit;
327 }
328
329 // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
330 // make sure the `raw_ptr<T>` release operation will never attempt to call the
331 // PA `free` on such a slot. GWP-ASan takes the extra reference into account
332 // when determining whether the slot can be reused.
333 PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
334 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
335 brp_cookie_ = CalculateCookie();
336 #endif
337 count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
338 std::memory_order_release);
339 }
340
341 PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
342 static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
343 return (count_.load(std::memory_order_acquire) & mask) == kPtrInc;
344 }
345
346 bool NeedsMac11MallocSizeHack() {
347 return count_.load(std::memory_order_relaxed) &
348 kNeedsMac11MallocSizeHackBit;
349 }
350
351 #if PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
352 PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
353 requested_size_ = static_cast<uint32_t>(size);
354 }
355 PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
356 #endif // PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
357
358 private:
359 // If there are some dangling raw_ptr<>. Turn on the error flag, and
360 // emit the `DanglingPtrDetected` once to embedders.
361 PA_ALWAYS_INLINE void CheckDanglingPointersOnFree(CountType count) {
362 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
363 // The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be
364 // zero when there are no unexpected dangling pointers.
365 if (PA_LIKELY((count & kPtrCountMask) == 0)) {
366 return;
367 }
368
369 // Two events are sent to embedders:
370 // 1. `DanglingRawPtrDetected` - Here
371 // 2. `DanglingRawPtrReleased` - In Release().
372 //
373 // The `dangling_detected` bit signals we must emit the second during
374 // `Release().
375 CountType old_count =
376 count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
377
378 // This function supports multiple calls. `DanglingRawPtrDetected` must be
379 // called only once. So only the first caller setting the bit can continue.
380 if ((old_count & kDanglingRawPtrDetectedBit) ==
381 kDanglingRawPtrDetectedBit) {
382 return;
383 }
384
385 partition_alloc::internal::DanglingRawPtrDetected(
386 reinterpret_cast<uintptr_t>(this));
387 #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
388 }
389
390 // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
391 // Called after updating the ref counts, |count| is the new value of |count_|
392 // set by fetch_sub. Returns true if memory can be reclaimed.
393 PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
394 // Do not release memory, if it is still held by any of:
395 // - The allocator
396 // - A raw_ptr<T>
397 // - A raw_ptr<T, DisableDanglingPtrDetection>
398 //
399 // Assuming this raw_ptr is not dangling, the memory must still be held at
400 // least by the allocator, so this is PA_LIKELY true.
401 if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
402 kUnprotectedPtrCountMask)))) {
403 return false; // Do not release the memory.
404 }
405
406 // In most thread-safe reference count implementations, an acquire
407 // barrier is required so that all changes made to an object from other
408 // threads are visible to its destructor. In our case, the destructor
409 // finishes before the final `Release` call, so it shouldn't be a problem.
410 // However, we will keep it as a precautionary measure.
411 std::atomic_thread_fence(std::memory_order_acquire);
412
413 // The allocation is about to get freed, so clear the cookie.
414 ClearCookieIfSupported();
415 return true;
416 }
417
418 // The cookie helps us ensure that:
419 // 1) The reference count pointer calculation is correct.
420 // 2) The returned allocation slot is not freed.
421 PA_ALWAYS_INLINE void CheckCookieIfSupported() {
422 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
423 PA_CHECK(brp_cookie_ == CalculateCookie());
424 #endif
425 }
426
427 PA_ALWAYS_INLINE void ClearCookieIfSupported() {
428 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
429 brp_cookie_ = 0;
430 #endif
431 }
432
433 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
434 PA_ALWAYS_INLINE uint32_t CalculateCookie() {
435 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
436 kCookieSalt;
437 }
438 #endif // PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
439
440 [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
441 DoubleFreeOrCorruptionDetected(CountType count) {
442 PA_DEBUG_DATA_ON_STACK("refcount", count);
443 PA_NO_CODE_FOLDING();
444 PA_IMMEDIATE_CRASH();
445 }
446
447 // Note that in free slots, this is overwritten by encoded freelist
448 // pointer(s). The way the pointers are encoded on 64-bit little-endian
449 // architectures, count_ happens stay even, which works well with the
450 // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
451 // this class, to preserve this functionality.
452 std::atomic<CountType> count_;
453
454 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
455 static constexpr uint32_t kCookieSalt = 0xc01dbeef;
456 volatile uint32_t brp_cookie_;
457 #endif
458
459 #if PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
460 uint32_t requested_size_;
461 #endif
462 };
463
InSlotMetadata(bool needs_mac11_malloc_size_hack)464 PA_ALWAYS_INLINE InSlotMetadata::InSlotMetadata(
465 bool needs_mac11_malloc_size_hack)
466 : count_(kMemoryHeldByAllocatorBit |
467 (needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
468 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
469 ,
470 brp_cookie_(CalculateCookie())
471 #endif
472 {
473 }
474
475 static_assert(kAlignment % alignof(InSlotMetadata) == 0,
476 "kAlignment must be multiples of alignof(InSlotMetadata).");
477
478 static constexpr size_t kInSlotMetadataBufferSize = sizeof(InSlotMetadata);
479
480 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
481
482 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) || \
483 PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
484 static constexpr size_t kInSlotMetadataSizeShift = 4;
485 #else
486 static constexpr size_t kInSlotMetadataSizeShift = 3;
487 #endif
488
489 #else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
490
491 #if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) && \
492 PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
493 static constexpr size_t kInSlotMetadataSizeShift = 4;
494 #elif PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) || \
495 PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
496 static constexpr size_t kInSlotMetadataSizeShift = 3;
497 #else
498 static constexpr size_t kInSlotMetadataSizeShift = 2;
499 #endif
500
501 #endif // PA_CONFIG(ENABLE_DANGLING_RAW_PTR_CHECKS)
502 static_assert((1 << kInSlotMetadataSizeShift) == sizeof(InSlotMetadata));
503
504 // The in-slot metadata table is tucked in the metadata region of the super
505 // page, and spans a single system page.
506 //
507 // We need one InSlotMetadata for each data system page in a super page. They
508 // take `x = sizeof(InSlotMetadata) * (kSuperPageSize / SystemPageSize())`
509 // space. They need to fit into a system page of metadata as sparsely as
510 // possible to minimize cache line sharing, hence we calculate a multiplier as
511 // `SystemPageSize() / x` which is equal to
512 // `SystemPageSize()^2 / kSuperPageSize / sizeof(InSlotMetadata)`.
513 //
514 // The multiplier is expressed as a bitshift to optimize the code generation.
515 // SystemPageSize() isn't always a constrexpr, in which case the compiler
516 // wouldn't know it's a power of two. The equivalence of these calculations is
517 // checked in PartitionAllocGlobalInit().
518 PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
GetInSlotMetadataIndexMultiplierShift()519 GetInSlotMetadataIndexMultiplierShift() {
520 return SystemPageShift() * 2 - kSuperPageShift - kInSlotMetadataSizeShift;
521 }
522
InSlotMetadataPointer(uintptr_t slot_start,size_t slot_size)523 PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(uintptr_t slot_start,
524 size_t slot_size) {
525 // In-slot metadata is typically put at the end of the slot. However, there
526 // are a handful of issues that need to be considered:
527 // 1. GWP-ASan uses 2-page slots and wants the 2nd page to be inaccissable, so
528 // putting an in-slot metadata there is a no-go.
529 // 2. When direct map is reallocated in-place, it's `slot_size` may change and
530 // pages can be (de)committed. This would force in-slot metadata
531 // relocation, which could lead to a race with the metadata access.
532 // 3. For single-slot spans, the unused pages between `GetUtilizedSlotSize()`
533 // and `slot_size` may be discarded thus interfering with the in-slot
534 // metadata.
535 //
536 // All of the above happen to have `slot_start` at the page boundary. We place
537 // the InSlotMetadata object out-of-line in this case, specifically in a
538 // special table after the super page metadata (see InSlotMetadataTable in
539 // partition_alloc_constants.h).
540 if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
541 uintptr_t refcount_address =
542 slot_start + slot_size - sizeof(InSlotMetadata);
543 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
544 PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0);
545 #endif
546 // TODO(bartekn): Plumb the tag from the callers, so that MTE tag can be
547 // included in the pointer arithmetic, and not re-read from memory.
548 return static_cast<InSlotMetadata*>(TagAddr(refcount_address));
549 } else {
550 // No need to MTE-tag, as the metadata region isn't protected by MTE.
551 InSlotMetadata* table_base = reinterpret_cast<InSlotMetadata*>(
552 (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
553 size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
554 << GetInSlotMetadataIndexMultiplierShift();
555 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
556 PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize());
557 #endif
558 return table_base + index;
559 }
560 }
561
562 static_assert(sizeof(InSlotMetadata) <= kInSlotMetadataBufferSize,
563 "InSlotMetadata should fit into the in-slot buffer.");
564
565 #else // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
566
567 static constexpr size_t kInSlotMetadataBufferSize = 0;
568
569 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
570
571 constexpr size_t kInSlotMetadataSizeAdjustment = kInSlotMetadataBufferSize;
572
573 } // namespace partition_alloc::internal
574
575 #endif // PARTITION_ALLOC_IN_SLOT_METADATA_H_
576