1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef PARTITION_ALLOC_PARTITION_PAGE_H_
6 #define PARTITION_ALLOC_PARTITION_PAGE_H_
7
8 #include <cstdint>
9
10 #include "build/build_config.h"
11 #include "partition_alloc/address_pool_manager.h"
12 #include "partition_alloc/address_pool_manager_types.h"
13 #include "partition_alloc/freeslot_bitmap_constants.h"
14 #include "partition_alloc/partition_address_space.h"
15 #include "partition_alloc/partition_alloc_base/bits.h"
16 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
17 #include "partition_alloc/partition_alloc_base/component_export.h"
18 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
19 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_check.h"
22 #include "partition_alloc/partition_alloc_constants.h"
23 #include "partition_alloc/partition_alloc_forward.h"
24 #include "partition_alloc/partition_bucket.h"
25 #include "partition_alloc/partition_dcheck_helper.h"
26 #include "partition_alloc/partition_freelist_entry.h"
27 #include "partition_alloc/partition_page_constants.h"
28 #include "partition_alloc/partition_superpage_extent_entry.h"
29 #include "partition_alloc/reservation_offset_table.h"
30
31 #if BUILDFLAG(USE_STARSCAN)
32 #include "partition_alloc/starscan/state_bitmap.h"
33 #endif
34
35 #if BUILDFLAG(PA_DCHECK_IS_ON)
36 #include "partition_alloc/tagging.h"
37 #endif
38
39 namespace partition_alloc::internal {
40
41 #if BUILDFLAG(USE_STARSCAN)
42 using AllocationStateMap =
43 StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
44 #endif
45
46 // Metadata of the slot span.
47 //
48 // Some notes on slot span states. It can be in one of four major states:
49 // 1) Active.
50 // 2) Full.
51 // 3) Empty.
52 // 4) Decommitted.
53 // An active slot span has available free slots, as well as allocated ones.
54 // A full slot span has no free slots. An empty slot span has no allocated
55 // slots, and a decommitted slot span is an empty one that had its backing
56 // memory released back to the system.
57 //
58 // There are three linked lists tracking slot spans. The "active" list is an
59 // approximation of a list of active slot spans. It is an approximation because
60 // full, empty and decommitted slot spans may briefly be present in the list
61 // until we next do a scan over it. The "empty" list holds mostly empty slot
62 // spans, but may briefly hold decommitted ones too. The "decommitted" list
63 // holds only decommitted slot spans.
64 //
65 // The significant slot span transitions are:
66 // - Free() will detect when a full slot span has a slot freed and immediately
67 // return the slot span to the head of the active list.
68 // - Free() will detect when a slot span is fully emptied. It _may_ add it to
69 // the empty list or it _may_ leave it on the active list until a future
70 // list scan.
71 // - Alloc() _may_ scan the active page list in order to fulfil the request.
72 // If it does this, full, empty and decommitted slot spans encountered will be
73 // booted out of the active list. If there are no suitable active slot spans
74 // found, an empty or decommitted slot spans (if one exists) will be pulled
75 // from the empty/decommitted list on to the active list.
76 #pragma pack(push, 1)
77 struct SlotSpanMetadata {
78 private:
79 PartitionFreelistEntry* freelist_head = nullptr;
80
81 public:
82 // TODO(lizeb): Make as many fields as possible private or const, to
83 // encapsulate things more clearly.
84 SlotSpanMetadata* next_slot_span = nullptr;
85 PartitionBucket* const bucket = nullptr;
86
87 // CHECK()ed in AllocNewSlotSpan().
88 // The maximum number of bits needed to cover all currently supported OSes.
89 static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
90 static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
91
92 // |marked_full| isn't equivalent to being full. Slot span is marked as full
93 // iff it isn't on the active slot span list (or any other list).
94 uint32_t marked_full : 1;
95 // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
96 // be further differentiated by checking existence of the freelist.
97 uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
98 uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
99
100 private:
101 const uint32_t can_store_raw_size_ : 1;
102 uint32_t freelist_is_sorted_ : 1;
103 uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
104 // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
105 // used.
106 uint16_t in_empty_cache_ : 1;
107 uint16_t empty_cache_index_
108 : kMaxEmptyCacheIndexBits; // < kMaxFreeableSpans.
109 uint16_t unused2_ : (16 - 1 - kMaxEmptyCacheIndexBits);
110 // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
111 // in PartitionPage which has 2B worth of fields and must fit in 32B.
112
113 public:
114 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
115 explicit SlotSpanMetadata(PartitionBucket* bucket);
116
117 inline SlotSpanMetadata(const SlotSpanMetadata&);
118
119 // Public API
120 // Note the matching Alloc() functions are in PartitionPage.
121 PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
122 size_t number_of_freed);
123 PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(
124 size_t size,
125 const PartitionFreelistDispatcher* freelist_dispatcher);
126 PA_ALWAYS_INLINE void Free(
127 uintptr_t ptr,
128 PartitionRoot* root,
129 const PartitionFreelistDispatcher* freelist_dispatcher)
130 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
131 // Appends the passed freelist to the slot-span's freelist. Please note that
132 // the function doesn't increment the tags of the passed freelist entries,
133 // since FreeInline() did it already.
134 PA_ALWAYS_INLINE void AppendFreeList(
135 PartitionFreelistEntry* head,
136 PartitionFreelistEntry* tail,
137 size_t number_of_freed,
138 PartitionRoot* root,
139 const PartitionFreelistDispatcher* freelist_dispatcher)
140 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
141
142 void Decommit(PartitionRoot* root);
143 void DecommitIfPossible(PartitionRoot* root);
144
145 // Sorts the freelist in ascending addresses order.
146 void SortFreelist();
147 // Inserts the slot span into the empty ring, making space for the new slot
148 // span, and potentially shrinking the ring.
149 void RegisterEmpty();
150
151 // Pointer/address manipulation functions. These must be static as the input
152 // |slot_span| pointer may be the result of an offset calculation and
153 // therefore cannot be trusted. The objective of these functions is to
154 // sanitize this input.
155 PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
156 const SlotSpanMetadata* slot_span);
157 PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
158 PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
159 PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
160 PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
161 uintptr_t address);
162 PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
163
164 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* ToSuperPageExtent() const;
165
166 // Checks if it is feasible to store raw_size.
CanStoreRawSizeSlotSpanMetadata167 PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
168 // The caller is responsible for ensuring that raw_size can be stored before
169 // calling Set/GetRawSize.
170 PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
171 PA_ALWAYS_INLINE size_t GetRawSize() const;
172
get_freelist_headSlotSpanMetadata173 PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
174 return freelist_head;
175 }
176 PA_ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
177
178 // Returns size of the region used within a slot. The used region comprises
179 // of actual allocated data, extras and possibly empty space in the middle.
GetUtilizedSlotSizeSlotSpanMetadata180 PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
181 // The returned size can be:
182 // - The slot size for small buckets.
183 // - Exact size needed to satisfy allocation (incl. extras), for large
184 // buckets and direct-mapped allocations (see also the comment in
185 // CanStoreRawSize() for more info).
186 if (PA_LIKELY(!CanStoreRawSize())) {
187 return bucket->slot_size;
188 }
189 return GetRawSize();
190 }
191
192 // This includes padding due to rounding done at allocation; we don't know the
193 // requested size at deallocation, so we use this in both places.
GetSlotSizeForBookkeepingSlotSpanMetadata194 PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
195 // This could be more precise for allocations where CanStoreRawSize()
196 // returns true (large allocations). However this is called for *every*
197 // allocation, so we don't want an extra branch there.
198 return bucket->slot_size;
199 }
200
201 // Returns the total size of the slots that are currently provisioned.
GetProvisionedSizeSlotSpanMetadata202 PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
203 size_t num_provisioned_slots =
204 bucket->get_slots_per_span() - num_unprovisioned_slots;
205 size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
206 PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
207 return provisioned_size;
208 }
209
210 // Return the number of entries in the freelist.
GetFreelistLengthSlotSpanMetadata211 size_t GetFreelistLength() const {
212 size_t num_provisioned_slots =
213 bucket->get_slots_per_span() - num_unprovisioned_slots;
214 return num_provisioned_slots - num_allocated_slots;
215 }
216
217 PA_ALWAYS_INLINE void Reset();
218
219 // TODO(ajwong): Can this be made private? https://crbug.com/787153
220 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
221 static const SlotSpanMetadata* get_sentinel_slot_span();
222 // The sentinel is not supposed to be modified and hence we mark it as const
223 // under the hood. However, we often store it together with mutable metadata
224 // objects and need a non-const pointer.
225 // You can use this function for this case, but you need to ensure that the
226 // returned object will not be written to.
227 static SlotSpanMetadata* get_sentinel_slot_span_non_const();
228
229 // Slot span state getters.
230 PA_ALWAYS_INLINE bool is_active() const;
231 PA_ALWAYS_INLINE bool is_full() const;
232 PA_ALWAYS_INLINE bool is_empty() const;
233 PA_ALWAYS_INLINE bool is_decommitted() const;
in_empty_cacheSlotSpanMetadata234 PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
freelist_is_sortedSlotSpanMetadata235 PA_ALWAYS_INLINE bool freelist_is_sorted() const {
236 return freelist_is_sorted_;
237 }
set_freelist_sortedSlotSpanMetadata238 PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
239
240 private:
241 // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
242 // span in the active list. We could use nullptr, but in that case we need to
243 // add a null-check branch to the hot allocation path. We want to avoid that.
244 //
245 // Note, this declaration is kept in the header as opposed to an anonymous
246 // namespace so the getter can be fully inlined.
247 static const SlotSpanMetadata sentinel_slot_span_;
248 // For the sentinel.
249 inline constexpr SlotSpanMetadata() noexcept;
250 };
251 #pragma pack(pop)
252 static_assert(sizeof(SlotSpanMetadata) <= kPageMetadataSize,
253 "SlotSpanMetadata must fit into a Page Metadata slot.");
254
SlotSpanMetadata()255 inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
256 : marked_full(0),
257 num_allocated_slots(0),
258 num_unprovisioned_slots(0),
259 can_store_raw_size_(false),
260 freelist_is_sorted_(true),
261 unused1_(0),
262 in_empty_cache_(0),
263 empty_cache_index_(0),
264 unused2_(0) {
265 (void)unused1_;
266 (void)unused2_;
267 }
268
269 inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
270
271 // Metadata of a non-first partition page in a slot span.
272 struct SubsequentPageMetadata {
273 // Raw size is the size needed to satisfy the allocation (requested size +
274 // extras). If available, it can be used to report better statistics or to
275 // bring protective cookie closer to the allocated memory.
276 //
277 // It can be used only if:
278 // - there is no more than one slot in the slot span (otherwise we wouldn't
279 // know which slot the raw size applies to)
280 // - there is more than one partition page in the slot span (the metadata of
281 // the first one is used to store slot information, but the second one is
282 // available for extra information)
283 size_t raw_size;
284 };
285
286 // Each partition page has metadata associated with it. The metadata of the
287 // first page of a slot span, describes that slot span. If a slot span spans
288 // more than 1 page, the page metadata may contain rudimentary additional
289 // information.
290 // "Pack" the union so that common page metadata still fits within
291 // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
292 #pragma pack(push, 1)
293 struct PartitionPageMetadata {
294 union {
295 SlotSpanMetadata slot_span_metadata;
296
297 SubsequentPageMetadata subsequent_page_metadata;
298
299 // sizeof(PartitionPageMetadata) must always be:
300 // - a power of 2 (for fast modulo operations)
301 // - below kPageMetadataSize
302 //
303 // This makes sure that this is respected no matter the architecture.
304 char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
305 };
306
307 // The first PartitionPage of the slot span holds its metadata. This offset
308 // tells how many pages in from that first page we are.
309 // For direct maps, the first page metadata (that isn't super page extent
310 // entry) uses this field to tell how many pages to the right the direct map
311 // metadata starts.
312 //
313 // 6 bits is enough to represent all possible offsets, given that the smallest
314 // partition page is 16kiB and the offset won't exceed 1MiB.
315 static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
316 static constexpr uint16_t kMaxSlotSpanMetadataOffset =
317 (1 << kMaxSlotSpanMetadataBits) - 1;
318 uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
319
320 // |is_valid| tells whether the page is part of a slot span. If |false|,
321 // |has_valid_span_after_this| tells whether it's an unused region in between
322 // slot spans within the super page.
323 // Note, |is_valid| has been added for clarity, but if we ever need to save
324 // this bit, it can be inferred from:
325 // |!slot_span_metadata_offset && slot_span_metadata->bucket|.
326 bool is_valid : 1;
327 bool has_valid_span_after_this : 1;
328 uint8_t unused;
329
330 PA_ALWAYS_INLINE static PartitionPageMetadata* FromAddr(uintptr_t address);
331 };
332 #pragma pack(pop)
333 static_assert(sizeof(PartitionPageMetadata) == kPageMetadataSize,
334 "PartitionPage must be able to fit in a metadata slot");
335
336 // Certain functions rely on PartitionPageMetadata being either SlotSpanMetadata
337 // or SubsequentPageMetadata, and therefore freely casting between each other.
338 // TODO(https://crbug.com/1500662) Stop ignoring the -Winvalid-offsetof warning.
339 #if defined(__clang__)
340 #pragma clang diagnostic push
341 #pragma clang diagnostic ignored "-Winvalid-offsetof"
342 #endif
343 static_assert(offsetof(PartitionPageMetadata, slot_span_metadata) == 0, "");
344 static_assert(offsetof(PartitionPageMetadata, subsequent_page_metadata) == 0,
345 "");
346 #if defined(__clang__)
347 #pragma clang diagnostic pop
348 #endif
349
PartitionSuperPageToMetadataArea(uintptr_t super_page)350 PA_ALWAYS_INLINE PartitionPageMetadata* PartitionSuperPageToMetadataArea(
351 uintptr_t super_page) {
352 // This can't be just any super page, but it has to be the first super page of
353 // the reservation, as we assume here that the metadata is near its beginning.
354 PA_DCHECK(IsReservationStart(super_page));
355 PA_DCHECK(!(super_page & kSuperPageOffsetMask));
356 // The metadata area is exactly one system page (the guard page) into the
357 // super page.
358 return reinterpret_cast<PartitionPageMetadata*>(super_page +
359 SystemPageSize());
360 }
361
GetSubsequentPageMetadata(const PartitionPageMetadata * page_metadata)362 PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
363 const PartitionPageMetadata* page_metadata) {
364 return &(page_metadata + 1)->subsequent_page_metadata;
365 }
366
GetSubsequentPageMetadata(PartitionPageMetadata * page_metadata)367 PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
368 PartitionPageMetadata* page_metadata) {
369 return &(page_metadata + 1)->subsequent_page_metadata;
370 }
371
PartitionSuperPageToExtent(uintptr_t super_page)372 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
373 uintptr_t super_page) {
374 // The very first entry of the metadata is the super page extent entry.
375 return reinterpret_cast<PartitionSuperPageExtentEntry*>(
376 PartitionSuperPageToMetadataArea(super_page));
377 }
378
379 #if BUILDFLAG(USE_STARSCAN)
380
381 // Size that should be reserved for state bitmap (if present) inside a super
382 // page. Elements of a super page are partition-page-aligned, hence the returned
383 // size is a multiple of partition page size.
384 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()385 ReservedStateBitmapSize() {
386 return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
387 }
388
389 // Size that should be committed for state bitmap (if present) inside a super
390 // page. It is a multiple of system page size.
391 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
CommittedStateBitmapSize()392 CommittedStateBitmapSize() {
393 return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
394 }
395
396 // Returns the address/pointer to the state bitmap in the super page. It's the
397 // caller's responsibility to ensure that the bitmaps even exist.
SuperPageStateBitmapAddr(uintptr_t super_page)398 PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
399 PA_DCHECK(!(super_page % kSuperPageAlignment));
400 return super_page + PartitionPageSize() +
401 (IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
402 : 0);
403 }
404
SuperPageStateBitmap(uintptr_t super_page)405 PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
406 uintptr_t super_page) {
407 return reinterpret_cast<AllocationStateMap*>(
408 SuperPageStateBitmapAddr(super_page));
409 }
410
411 #else // BUILDFLAG(USE_STARSCAN)
412
413 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()414 ReservedStateBitmapSize() {
415 return 0ull;
416 }
417
418 #endif // BUILDFLAG(USE_STARSCAN)
419
420 PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,bool with_quarantine)421 SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
422 bool with_quarantine) {
423 return PartitionPageSize() +
424 (is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
425 (with_quarantine ? ReservedStateBitmapSize() : 0);
426 }
427
SuperPagePayloadBegin(uintptr_t super_page,bool with_quarantine)428 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
429 bool with_quarantine) {
430 PA_DCHECK(!(super_page % kSuperPageAlignment));
431 return super_page +
432 SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
433 with_quarantine);
434 }
435
SuperPagePayloadEndOffset()436 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
437 return kSuperPageSize - PartitionPageSize();
438 }
439
SuperPagePayloadEnd(uintptr_t super_page)440 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
441 PA_DCHECK(!(super_page % kSuperPageAlignment));
442 return super_page + SuperPagePayloadEndOffset();
443 }
444
SuperPagePayloadSize(uintptr_t super_page,bool with_quarantine)445 PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
446 bool with_quarantine) {
447 return SuperPagePayloadEnd(super_page) -
448 SuperPagePayloadBegin(super_page, with_quarantine);
449 }
450
451 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry*
ToSuperPageExtent()452 SlotSpanMetadata::ToSuperPageExtent() const {
453 uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
454 return PartitionSuperPageToExtent(super_page);
455 }
456
457 // Returns whether the pointer lies within the super page's payload area (i.e.
458 // area devoted to slot spans). It doesn't check whether it's within a valid
459 // slot span. It merely ensures it doesn't fall in a meta-data region that would
460 // surely never contain user data.
IsWithinSuperPagePayload(uintptr_t address,bool with_quarantine)461 PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
462 bool with_quarantine) {
463 // Quarantine can only be enabled for normal buckets in the current code.
464 PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
465 uintptr_t super_page = address & kSuperPageBaseMask;
466 uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
467 uintptr_t payload_end = SuperPagePayloadEnd(super_page);
468 return address >= payload_start && address < payload_end;
469 }
470
471 // Converts from an address inside a super page into a pointer to the
472 // PartitionPageMetadata object (within super pages's metadata) that describes
473 // the partition page where |address| is located. |address| doesn't have to be
474 // located within a valid (i.e. allocated) slot span, but must be within the
475 // super page's payload area (i.e. area devoted to slot spans).
476 //
477 // While it is generally valid for |ptr| to be in the middle of an allocation,
478 // care has to be taken with direct maps that span multiple super pages. This
479 // function's behavior is undefined if |ptr| lies in a subsequent super page.
FromAddr(uintptr_t address)480 PA_ALWAYS_INLINE PartitionPageMetadata* PartitionPageMetadata::FromAddr(
481 uintptr_t address) {
482 uintptr_t super_page = address & kSuperPageBaseMask;
483
484 #if BUILDFLAG(PA_DCHECK_IS_ON)
485 PA_DCHECK(IsReservationStart(super_page));
486 DCheckIsWithInSuperPagePayload(address);
487 #endif
488
489 uintptr_t partition_page_index =
490 (address & kSuperPageOffsetMask) >> PartitionPageShift();
491 // Index 0 is invalid because it is the super page extent metadata and the
492 // last index is invalid because the whole PartitionPage is set as guard
493 // pages. This repeats part of the payload PA_DCHECK above, which also checks
494 // for other exclusions.
495 PA_DCHECK(partition_page_index);
496 PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
497 return PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
498 }
499
500 // Converts from a pointer to the SlotSpanMetadata object (within a super
501 // pages's metadata) into a pointer to the beginning of the slot span. This
502 // works on direct maps too.
503 PA_ALWAYS_INLINE uintptr_t
ToSlotSpanStart(const SlotSpanMetadata * slot_span)504 SlotSpanMetadata::ToSlotSpanStart(const SlotSpanMetadata* slot_span) {
505 uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
506 uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
507
508 // A valid |page| must be past the first guard System page and within
509 // the following metadata region.
510 PA_DCHECK(super_page_offset > SystemPageSize());
511 // Must be less than total metadata region.
512 PA_DCHECK(super_page_offset <
513 SystemPageSize() +
514 (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
515 uintptr_t partition_page_index =
516 (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
517 // Index 0 is invalid because it is the super page extent metadata and the
518 // last index is invalid because the whole PartitionPage is set as guard
519 // pages.
520 PA_DCHECK(partition_page_index);
521 PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
522 uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
523 return super_page_base + (partition_page_index << PartitionPageShift());
524 }
525
526 // Converts an address inside a slot span into a pointer to the SlotSpanMetadata
527 // object (within super pages's metadata) that describes the slot span
528 // containing that slot.
529 //
530 // CAUTION! For direct-mapped allocation, |address| has to be within the first
531 // partition page.
FromAddr(uintptr_t address)532 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromAddr(
533 uintptr_t address) {
534 auto* page_metadata = PartitionPageMetadata::FromAddr(address);
535 PA_DCHECK(page_metadata->is_valid);
536 // Partition pages in the same slot span share the same SlotSpanMetadata
537 // object (located in the first PartitionPageMetadata object of that span).
538 // Adjust for that.
539 page_metadata -= page_metadata->slot_span_metadata_offset;
540 PA_DCHECK(page_metadata->is_valid);
541 PA_DCHECK(!page_metadata->slot_span_metadata_offset);
542 auto* slot_span = &page_metadata->slot_span_metadata;
543 // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
544 DCheckIsValidSlotSpan(slot_span);
545 // For direct map, if |address| doesn't point within the first partition page,
546 // |slot_span_metadata_offset| will be 0, |page_metadata| won't get shifted,
547 // leaving |slot_size| at 0.
548 PA_DCHECK(slot_span->bucket->slot_size);
549 return slot_span;
550 }
551
552 // Like |FromAddr|, but asserts that |slot_start| indeed points to the
553 // beginning of a slot. It doesn't check if the slot is actually allocated.
554 //
555 // This works on direct maps too.
FromSlotStart(uintptr_t slot_start)556 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromSlotStart(
557 uintptr_t slot_start) {
558 auto* slot_span = FromAddr(slot_start);
559 #if BUILDFLAG(PA_DCHECK_IS_ON)
560 // Checks that the pointer is a multiple of slot size.
561 uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
562 PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
563 #endif // BUILDFLAG(PA_DCHECK_IS_ON)
564 return slot_span;
565 }
566
567 // Like |FromAddr|, but asserts that |object| indeed points to the beginning of
568 // an object. It doesn't check if the object is actually allocated.
569 //
570 // This works on direct maps too.
FromObject(void * object)571 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObject(void* object) {
572 uintptr_t object_addr = ObjectPtr2Addr(object);
573 auto* slot_span = FromAddr(object_addr);
574 DCheckIsValidObjectAddress(slot_span, object_addr);
575 return slot_span;
576 }
577
578 // Like |FromAddr|, but asserts that |address| indeed points within an object.
579 // It doesn't check if the object is actually allocated.
580 //
581 // CAUTION! For direct-mapped allocation, |address| has to be within the first
582 // partition page.
FromObjectInnerAddr(uintptr_t address)583 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerAddr(
584 uintptr_t address) {
585 auto* slot_span = FromAddr(address);
586 #if BUILDFLAG(PA_DCHECK_IS_ON)
587 // Checks that the address is within the expected object boundaries.
588 uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
589 uintptr_t shift_from_slot_start =
590 (address - slot_span_start) % slot_span->bucket->slot_size;
591 DCheckIsValidShiftFromSlotStart(slot_span, shift_from_slot_start);
592 #endif // BUILDFLAG(PA_DCHECK_IS_ON)
593 return slot_span;
594 }
595
FromObjectInnerPtr(void * ptr)596 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerPtr(
597 void* ptr) {
598 return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
599 }
600
SetRawSize(size_t raw_size)601 PA_ALWAYS_INLINE void SlotSpanMetadata::SetRawSize(size_t raw_size) {
602 PA_DCHECK(CanStoreRawSize());
603 auto* subsequent_page_metadata =
604 GetSubsequentPageMetadata(reinterpret_cast<PartitionPageMetadata*>(this));
605 subsequent_page_metadata->raw_size = raw_size;
606 }
607
GetRawSize()608 PA_ALWAYS_INLINE size_t SlotSpanMetadata::GetRawSize() const {
609 PA_DCHECK(CanStoreRawSize());
610 const auto* subsequent_page_metadata = GetSubsequentPageMetadata(
611 reinterpret_cast<const PartitionPageMetadata*>(this));
612 return subsequent_page_metadata->raw_size;
613 }
614
SetFreelistHead(PartitionFreelistEntry * new_head)615 PA_ALWAYS_INLINE void SlotSpanMetadata::SetFreelistHead(
616 PartitionFreelistEntry* new_head) {
617 #if BUILDFLAG(PA_DCHECK_IS_ON)
618 // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
619 // as well.
620 uintptr_t new_head_untagged = UntagPtr(new_head);
621 PA_DCHECK(!new_head ||
622 (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
623 (new_head_untagged & kSuperPageBaseMask));
624 #endif
625 freelist_head = new_head;
626 // Inserted something new in the freelist, assume that it is not sorted
627 // anymore.
628 freelist_is_sorted_ = false;
629 }
630
PopForAlloc(size_t size,const PartitionFreelistDispatcher * freelist_dispatcher)631 PA_ALWAYS_INLINE PartitionFreelistEntry* SlotSpanMetadata::PopForAlloc(
632 size_t size,
633 const PartitionFreelistDispatcher* freelist_dispatcher) {
634 // Not using bucket->slot_size directly as the compiler doesn't know that
635 // |bucket->slot_size| is the same as |size|.
636 PA_DCHECK(size == bucket->slot_size);
637 PartitionFreelistEntry* result = freelist_head;
638 // Not setting freelist_is_sorted_ to false since this doesn't destroy
639 // ordering.
640 freelist_head = freelist_dispatcher->GetNext(freelist_head, size);
641
642 num_allocated_slots++;
643 return result;
644 }
645
Free(uintptr_t slot_start,PartitionRoot * root,const PartitionFreelistDispatcher * freelist_dispatcher)646 PA_ALWAYS_INLINE void SlotSpanMetadata::Free(
647 uintptr_t slot_start,
648 PartitionRoot* root,
649 const PartitionFreelistDispatcher* freelist_dispatcher)
650 // PartitionRootLock() is not defined inside partition_page.h, but
651 // static analysis doesn't require the implementation.
652 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
653 DCheckRootLockIsAcquired(root);
654 auto* entry =
655 static_cast<PartitionFreelistEntry*>(SlotStartAddr2Ptr(slot_start));
656 // Catches an immediate double free.
657 PA_CHECK(entry != freelist_head);
658
659 // Look for double free one level deeper in debug.
660 PA_DCHECK(!freelist_head || entry != freelist_dispatcher->GetNext(
661 freelist_head, bucket->slot_size));
662 freelist_dispatcher->SetNext(entry, freelist_head);
663 SetFreelistHead(entry);
664 // A best effort double-free check. Works only on empty slot spans.
665 PA_CHECK(num_allocated_slots);
666 --num_allocated_slots;
667 // If the span is marked full, or became empty, take the slow path to update
668 // internal state.
669 if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
670 FreeSlowPath(1);
671 } else {
672 // All single-slot allocations must go through the slow path to
673 // correctly update the raw size.
674 PA_DCHECK(!CanStoreRawSize());
675 }
676 }
677
AppendFreeList(PartitionFreelistEntry * head,PartitionFreelistEntry * tail,size_t number_of_freed,PartitionRoot * root,const PartitionFreelistDispatcher * freelist_dispatcher)678 PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
679 PartitionFreelistEntry* head,
680 PartitionFreelistEntry* tail,
681 size_t number_of_freed,
682 PartitionRoot* root,
683 const PartitionFreelistDispatcher* freelist_dispatcher)
684 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
685 #if BUILDFLAG(PA_DCHECK_IS_ON)
686 DCheckRootLockIsAcquired(root);
687 PA_DCHECK(!(freelist_dispatcher->GetNext(tail, bucket->slot_size)));
688 PA_DCHECK(number_of_freed);
689 PA_DCHECK(num_allocated_slots);
690 if (CanStoreRawSize()) {
691 PA_DCHECK(number_of_freed == 1);
692 }
693 {
694 size_t number_of_entries = 0;
695 for (auto* entry = head; entry;
696 entry = freelist_dispatcher->GetNext(entry, bucket->slot_size),
697 ++number_of_entries) {
698 uintptr_t untagged_entry = UntagPtr(entry);
699 // Check that all entries belong to this slot span.
700 PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
701 PA_DCHECK(untagged_entry <
702 ToSlotSpanStart(this) + bucket->get_bytes_per_span());
703 }
704 PA_DCHECK(number_of_entries == number_of_freed);
705 }
706 #endif
707
708 freelist_dispatcher->SetNext(tail, freelist_head);
709 SetFreelistHead(head);
710 PA_DCHECK(num_allocated_slots >= number_of_freed);
711 num_allocated_slots -= number_of_freed;
712 // If the span is marked full, or became empty, take the slow path to update
713 // internal state.
714 if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
715 FreeSlowPath(number_of_freed);
716 } else {
717 // All single-slot allocations must go through the slow path to
718 // correctly update the raw size.
719 PA_DCHECK(!CanStoreRawSize());
720 }
721 }
722
is_active()723 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_active() const {
724 PA_DCHECK(this != get_sentinel_slot_span());
725 bool ret =
726 (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
727 if (ret) {
728 PA_DCHECK(!marked_full);
729 PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
730 }
731 return ret;
732 }
733
is_full()734 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_full() const {
735 PA_DCHECK(this != get_sentinel_slot_span());
736 bool ret = (num_allocated_slots == bucket->get_slots_per_span());
737 if (ret) {
738 PA_DCHECK(!freelist_head);
739 PA_DCHECK(!num_unprovisioned_slots);
740 // May or may not be marked full, so don't check for that.
741 }
742 return ret;
743 }
744
is_empty()745 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_empty() const {
746 PA_DCHECK(this != get_sentinel_slot_span());
747 bool ret = (!num_allocated_slots && freelist_head);
748 if (ret) {
749 PA_DCHECK(!marked_full);
750 }
751 return ret;
752 }
753
is_decommitted()754 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_decommitted() const {
755 PA_DCHECK(this != get_sentinel_slot_span());
756 bool ret = (!num_allocated_slots && !freelist_head);
757 if (ret) {
758 PA_DCHECK(!marked_full);
759 PA_DCHECK(!num_unprovisioned_slots);
760 PA_DCHECK(!in_empty_cache_);
761 }
762 return ret;
763 }
764
Reset()765 PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
766 PA_DCHECK(is_decommitted());
767
768 size_t num_slots_per_span = bucket->get_slots_per_span();
769 PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
770 num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
771 PA_DCHECK(num_unprovisioned_slots);
772
773 ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
774
775 next_slot_span = nullptr;
776 }
777
778 #if BUILDFLAG(USE_STARSCAN)
779 // Returns the state bitmap from an address within a normal-bucket super page.
780 // It's the caller's responsibility to ensure that the bitmap exists.
StateBitmapFromAddr(uintptr_t address)781 PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
782 PA_DCHECK(IsManagedByNormalBuckets(address));
783 uintptr_t super_page = address & kSuperPageBaseMask;
784 return SuperPageStateBitmap(super_page);
785 }
786 #endif // BUILDFLAG(USE_STARSCAN)
787
788 // Iterates over all slot spans in a super-page. |Callback| must return true if
789 // early return is needed.
790 template <typename Callback>
IterateSlotSpans(uintptr_t super_page,bool with_quarantine,Callback callback)791 void IterateSlotSpans(uintptr_t super_page,
792 bool with_quarantine,
793 Callback callback) {
794 #if BUILDFLAG(PA_DCHECK_IS_ON)
795 PA_DCHECK(!(super_page % kSuperPageAlignment));
796 auto* extent_entry = PartitionSuperPageToExtent(super_page);
797 DCheckRootLockIsAcquired(extent_entry->root);
798 #endif
799
800 auto* const first_page_metadata = PartitionPageMetadata::FromAddr(
801 SuperPagePayloadBegin(super_page, with_quarantine));
802 auto* const last_page_metadata = PartitionPageMetadata::FromAddr(
803 SuperPagePayloadEnd(super_page) - PartitionPageSize());
804 PartitionPageMetadata* page_metadata = nullptr;
805 SlotSpanMetadata* slot_span = nullptr;
806 for (page_metadata = first_page_metadata;
807 page_metadata <= last_page_metadata;) {
808 PA_DCHECK(!page_metadata
809 ->slot_span_metadata_offset); // Ensure slot span beginning.
810 if (!page_metadata->is_valid) {
811 if (page_metadata->has_valid_span_after_this) {
812 // page_metadata doesn't represent a valid slot span, but there is
813 // another one somewhere after this. Keep iterating to find it.
814 ++page_metadata;
815 continue;
816 }
817 // There are currently no valid spans from here on. No need to iterate
818 // the rest of the super page_metadata.
819 break;
820 }
821 slot_span = &page_metadata->slot_span_metadata;
822 if (callback(slot_span)) {
823 return;
824 }
825 page_metadata += slot_span->bucket->get_pages_per_slot_span();
826 }
827 // Each super page must have at least one valid slot span.
828 PA_DCHECK(page_metadata > first_page_metadata);
829 // Just a quick check that the search ended at a valid slot span and there
830 // was no unnecessary iteration over gaps afterwards.
831 PA_DCHECK(page_metadata ==
832 reinterpret_cast<PartitionPageMetadata*>(slot_span) +
833 slot_span->bucket->get_pages_per_slot_span());
834 }
835
836 } // namespace partition_alloc::internal
837
838 #endif // PARTITION_ALLOC_PARTITION_PAGE_H_
839