1 // Copyright 2018 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef PARTITION_ALLOC_PARTITION_BUCKET_H_ 6 #define PARTITION_ALLOC_PARTITION_BUCKET_H_ 7 8 #include <cstddef> 9 #include <cstdint> 10 11 #include "partition_alloc/partition_alloc_base/compiler_specific.h" 12 #include "partition_alloc/partition_alloc_base/component_export.h" 13 #include "partition_alloc/partition_alloc_base/thread_annotations.h" 14 #include "partition_alloc/partition_alloc_check.h" 15 #include "partition_alloc/partition_alloc_constants.h" 16 #include "partition_alloc/partition_alloc_forward.h" 17 #include "partition_alloc/partition_page_constants.h" 18 19 namespace partition_alloc::internal { 20 21 constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8; 22 23 // Visible for testing. 24 PA_COMPONENT_EXPORT(PARTITION_ALLOC) 25 uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size, 26 bool prefer_smaller_slot_spans); 27 28 // Visible for testing. 29 PA_COMPONENT_EXPORT(PARTITION_ALLOC) 30 bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b); 31 32 struct PartitionBucket { 33 // Accessed most in hot path => goes first. Only nullptr for invalid buckets, 34 // may be pointing to the sentinel. 35 SlotSpanMetadata* active_slot_spans_head; 36 37 SlotSpanMetadata* empty_slot_spans_head; 38 SlotSpanMetadata* decommitted_slot_spans_head; 39 uint32_t slot_size; 40 uint32_t num_system_pages_per_slot_span 41 : kPartitionNumSystemPagesPerSlotSpanBits; 42 uint32_t num_full_slot_spans : 24; 43 44 // `slot_size_reciprocal` is used to improve the performance of 45 // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is 46 // chosen to provide the desired accuracy. As a result, we can replace a slow 47 // integer division (or modulo) operation with a pair of multiplication and a 48 // bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`. 49 uint64_t slot_size_reciprocal; 50 51 // This is `M` from the formula above. For accurate results, both `value` and 52 // `size`, which are bound by `kMaxBucketed` for our purposes, must be less 53 // than `2 ** (M / 2)`. On the other hand, the result of the expression 54 // `3 * M / 2` must be less than 64, otherwise integer overflow can occur. 55 static constexpr uint64_t kReciprocalShift = 42; 56 static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1; 57 static_assert( 58 kMaxBucketed < (1 << (kReciprocalShift / 2)), 59 "GetSlotOffset may produce an incorrect result when kMaxBucketed is too " 60 "large."); 61 62 static constexpr size_t kMaxSlotSpansToSort = 200; 63 64 // Public API. 65 PA_COMPONENT_EXPORT(PARTITION_ALLOC) void Init(uint32_t new_slot_size); 66 67 // Sets |is_already_zeroed| to true if the allocation was satisfied by 68 // requesting (a) new page(s) from the operating system, or false otherwise. 69 // This enables an optimization for when callers use 70 // |AllocFlags::kZeroFill|: there is no need to call memset on fresh 71 // pages; the OS has already zeroed them. (See 72 // |PartitionRoot::AllocFromBucket|.) 73 // 74 // Note the matching Free() functions are in SlotSpanMetadata. 75 PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t 76 SlowPathAlloc(PartitionRoot* root, 77 AllocFlags flags, 78 size_t raw_size, 79 size_t slot_span_alignment, 80 SlotSpanMetadata** slot_span, 81 bool* is_already_zeroed) 82 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 83 CanStoreRawSizePartitionBucket84 PA_ALWAYS_INLINE bool CanStoreRawSize() const { 85 // For direct-map as well as single-slot slot spans (recognized by checking 86 // against |MaxRegularSlotSpanSize()|), we have some spare metadata space in 87 // subsequent PartitionPage to store the raw size. It isn't only metadata 88 // space though, slot spans that have more than one slot can't have raw size 89 // stored, because we wouldn't know which slot it applies to. 90 if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) { 91 return false; 92 } 93 94 PA_DCHECK((slot_size % SystemPageSize()) == 0); 95 PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1); 96 97 return true; 98 } 99 100 // Some buckets are pseudo-buckets, which are disabled because they would 101 // otherwise not fulfill alignment constraints. is_validPartitionBucket102 PA_ALWAYS_INLINE bool is_valid() const { 103 return active_slot_spans_head != nullptr; 104 } is_direct_mappedPartitionBucket105 PA_ALWAYS_INLINE bool is_direct_mapped() const { 106 return !num_system_pages_per_slot_span; 107 } get_bytes_per_spanPartitionBucket108 PA_ALWAYS_INLINE size_t get_bytes_per_span() const { 109 // Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255 110 // pages fit in a size_t. 111 static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, ""); 112 return static_cast<size_t>(num_system_pages_per_slot_span) 113 << SystemPageShift(); 114 } get_slots_per_spanPartitionBucket115 PA_ALWAYS_INLINE size_t get_slots_per_span() const { 116 size_t ret = GetSlotNumber(get_bytes_per_span()); 117 PA_DCHECK(ret <= kMaxSlotsPerSlotSpan); 118 return ret; 119 } 120 // Returns a natural number of partition pages (calculated by 121 // ComputeSystemPagesPerSlotSpan()) to allocate from the current super page 122 // when the bucket runs out of slots. get_pages_per_slot_spanPartitionBucket123 PA_ALWAYS_INLINE size_t get_pages_per_slot_span() const { 124 // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage(). 125 return (num_system_pages_per_slot_span + 126 (NumSystemPagesPerPartitionPage() - 1)) / 127 NumSystemPagesPerPartitionPage(); 128 } 129 130 // This helper function scans a bucket's active slot span list for a suitable 131 // new active slot span. When it finds a suitable new active slot span (one 132 // that has free slots and is not empty), it is set as the new active slot 133 // span. If there is no suitable new active slot span, the current active slot 134 // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential 135 // slot spans are scanned, they are tidied up according to their state. Empty 136 // slot spans are swept on to the empty list, decommitted slot spans on to the 137 // decommitted list and full slot spans are unlinked from any list. 138 // 139 // This is where the guts of the bucket maintenance is done! 140 bool SetNewActiveSlotSpan(); 141 142 // Walks the entire active slot span list, and perform regular maintenance, 143 // where empty, decommitted and full slot spans are moved to their 144 // steady-state place. 145 PA_COMPONENT_EXPORT(PARTITION_ALLOC) void MaintainActiveList(); 146 147 // Returns a slot number starting from the beginning of the slot span. GetSlotNumberPartitionBucket148 PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const { 149 // See the static assertion for `kReciprocalShift` above. 150 PA_DCHECK(offset_in_slot_span <= kMaxBucketed); 151 PA_DCHECK(slot_size <= kMaxBucketed); 152 153 const size_t offset_in_slot = 154 ((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift); 155 PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot); 156 157 return offset_in_slot; 158 } 159 160 // Sort the freelists of all slot spans. 161 void SortSmallerSlotSpanFreeLists(); 162 // Sort the active slot span list in ascending freelist length. 163 PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SortActiveSlotSpans(); 164 165 // We need `AllocNewSuperPageSpan` and `InitializeSlotSpan` to stay 166 // PA_ALWAYS_INLINE for speed, but we also need to use them from a separate 167 // compilation unit. 168 uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot* root, 169 size_t super_page_count, 170 AllocFlags flags) 171 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 172 void InitializeSlotSpanForGwpAsan(SlotSpanMetadata* slot_span); 173 174 private: 175 // Allocates several consecutive super pages. Returns the address of the first 176 // super page. 177 PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(PartitionRoot* root, 178 size_t super_page_count, 179 AllocFlags flags) 180 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 181 // Allocates a new slot span with size |num_partition_pages| from the 182 // current extent. Metadata within this slot span will be initialized. 183 // Returns nullptr on error. 184 PA_ALWAYS_INLINE SlotSpanMetadata* AllocNewSlotSpan( 185 PartitionRoot* root, 186 AllocFlags flags, 187 size_t slot_span_alignment) 188 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 189 190 // Allocates a new super page from the current extent, if possible. All 191 // slot-spans will be in the decommitted state. Returns the address of the 192 // super page's payload, or 0 on error. 193 PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot* root, 194 AllocFlags flags) 195 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 196 197 // Each bucket allocates a slot span when it runs out of slots. 198 // A slot span's size is equal to get_pages_per_slot_span() number of 199 // partition pages. This function initializes all PartitionPage within the 200 // span to point to the first PartitionPage which holds all the metadata 201 // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket 202 // as the owner of the span. It does NOT put the slots into the bucket's 203 // freelist. 204 PA_ALWAYS_INLINE void InitializeSlotSpan(SlotSpanMetadata* slot_span); 205 206 // Initializes a super page. Returns the address of the super page's payload. 207 PA_ALWAYS_INLINE uintptr_t InitializeSuperPage(PartitionRoot* root, 208 uintptr_t super_page, 209 uintptr_t requested_address) 210 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 211 // Commit 1 or more pages in |slot_span|, enough to get the next slot, which 212 // is returned by this function. If more slots fit into the committed pages, 213 // they'll be added to the free list of the slot span (note that next pointers 214 // are stored inside the slots). 215 // The free list must be empty when calling this function. 216 // 217 // If |slot_span| was freshly allocated, it must have been passed through 218 // InitializeSlotSpan() first. 219 PA_ALWAYS_INLINE uintptr_t 220 ProvisionMoreSlotsAndAllocOne(PartitionRoot* root, 221 AllocFlags flags, 222 SlotSpanMetadata* slot_span) 223 PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)); 224 }; 225 226 } // namespace partition_alloc::internal 227 228 #endif // PARTITION_ALLOC_PARTITION_BUCKET_H_ 229