1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
6 #define PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/partition_address_space.h"
13 #include "partition_alloc/partition_alloc-inl.h"
14 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
15 #include "partition_alloc/partition_alloc_config.h"
16 #include "partition_alloc/partition_alloc_constants.h"
17 #include "partition_alloc/tagging.h"
18 
19 #if !defined(ARCH_CPU_BIG_ENDIAN)
20 #include "partition_alloc/reverse_bytes.h"
21 #endif  // !defined(ARCH_CPU_BIG_ENDIAN)
22 
23 namespace partition_alloc::internal {
24 
25 namespace {
26 using PoolInfo = PartitionAddressSpace::PoolInfo;
27 }
28 
29 class PoolOffsetFreelistEntry;
30 
31 class EncodedPoolOffset {
32 #if defined(ARCH_CPU_BIG_ENDIAN)
33   static constexpr uintptr_t kEncodeedNullptr = ~uintptr_t{0};
34 #else
35   static constexpr uintptr_t kEncodeedNullptr = uintptr_t{0};
36 #endif
37 
EncodedPoolOffset(std::nullptr_t)38   PA_ALWAYS_INLINE constexpr explicit EncodedPoolOffset(std::nullptr_t)
39       : encoded_(kEncodeedNullptr) {}
EncodedPoolOffset(void * ptr)40   PA_ALWAYS_INLINE explicit EncodedPoolOffset(void* ptr)
41       // The encoded pointer stays MTE-tagged.
42       : encoded_(Encode(ptr)) {}
43 
Inverted()44   PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
45 
Override(uintptr_t encoded)46   PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
47     encoded_ = encoded;
48   }
49 
50   PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
51 
52   // Determines the containing pool of `ptr` and returns `ptr`
53   // represented as a tagged offset into that pool.
Encode(void * ptr)54   PA_ALWAYS_INLINE static uintptr_t Encode(void* ptr) {
55     if (!ptr) {
56       return kEncodeedNullptr;
57     }
58     uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
59     PoolInfo pool_info = PartitionAddressSpace::GetPoolInfo(addr);
60     // Save a MTE tag as well as an offset.
61     uintptr_t tagged_offset = addr & (kPtrTagMask | ~pool_info.base_mask);
62     PA_DCHECK(tagged_offset == (pool_info.offset | (addr & kPtrTagMask)));
63 
64 #if defined(ARCH_CPU_BIG_ENDIAN)
65     uintptr_t encoded = ~tagged_offset;
66 #else
67     uintptr_t encoded = ReverseBytes(tagged_offset);
68 #endif
69     return encoded;
70   }
71 
72   // Given `pool_info`, decodes a `tagged_offset` into a tagged pointer.
Decode(PoolInfo & pool_info)73   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* Decode(PoolInfo& pool_info) const {
74 #if defined(ARCH_CPU_BIG_ENDIAN)
75     uintptr_t tagged_offset = ~encoded_;
76 #else
77     uintptr_t tagged_offset = ReverseBytes(encoded_);
78 #endif
79 
80     // We assume `tagged_offset` contains a proper MTE tag.
81     return reinterpret_cast<PoolOffsetFreelistEntry*>(pool_info.base |
82                                                       tagged_offset);
83   }
84 
85   uintptr_t encoded_;
86 
87   friend PoolOffsetFreelistEntry;
88 };
89 
90 // This implementation of PartitionAlloc's freelist uses pool offsets
91 // rather than naked pointers. This is intended to prevent usage of
92 // freelist pointers to easily jump around to freed slots.
93 class PoolOffsetFreelistEntry {
PoolOffsetFreelistEntry(std::nullptr_t)94   constexpr explicit PoolOffsetFreelistEntry(std::nullptr_t)
95       : encoded_next_(nullptr)
96 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
97         ,
98         shadow_(encoded_next_.Inverted())
99 #endif
100   {
101   }
PoolOffsetFreelistEntry(PoolOffsetFreelistEntry * next)102   explicit PoolOffsetFreelistEntry(PoolOffsetFreelistEntry* next)
103       : encoded_next_(next)
104 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
105         ,
106         shadow_(encoded_next_.Inverted())
107 #endif
108   {
109   }
110   // For testing only.
PoolOffsetFreelistEntry(void * next,bool make_shadow_match)111   PoolOffsetFreelistEntry(void* next, bool make_shadow_match)
112       : encoded_next_(next)
113 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
114         ,
115         shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
116 #endif
117   {
118   }
119 
120  public:
121   ~PoolOffsetFreelistEntry() = delete;
122 
123   // Emplaces the freelist entry at the beginning of the given slot span, and
124   // initializes it as null-terminated.
EmplaceAndInitNull(void * slot_start_tagged)125   PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitNull(
126       void* slot_start_tagged) {
127     // |slot_start_tagged| is MTE-tagged.
128     auto* entry = new (slot_start_tagged) PoolOffsetFreelistEntry(nullptr);
129     return entry;
130   }
131 
EmplaceAndInitNull(uintptr_t slot_start)132   PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitNull(
133       uintptr_t slot_start) {
134     return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
135   }
136 
137   // Emplaces the freelist entry at the beginning of the given slot span, and
138   // initializes it with the given |next| pointer, but encoded.
139   //
140   // This freelist is built for the purpose of thread-cache. This means that we
141   // can't perform a check that this and the next pointer belong to the same
142   // super page, as thread-cache spans may chain slots across super pages.
EmplaceAndInitForThreadCache(uintptr_t slot_start,PoolOffsetFreelistEntry * next)143   PA_ALWAYS_INLINE static PoolOffsetFreelistEntry* EmplaceAndInitForThreadCache(
144       uintptr_t slot_start,
145       PoolOffsetFreelistEntry* next) {
146     auto* entry =
147         new (SlotStartAddr2Ptr(slot_start)) PoolOffsetFreelistEntry(next);
148     return entry;
149   }
150 
151   // Emplaces the freelist entry at the beginning of the given slot span, and
152   // initializes it with the given |next| pointer.
153   //
154   // This is for testing purposes only! |make_shadow_match| allows you to choose
155   // if the shadow matches the next pointer properly or is trash.
EmplaceAndInitForTest(uintptr_t slot_start,void * next,bool make_shadow_match)156   PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
157                                                      void* next,
158                                                      bool make_shadow_match) {
159     new (SlotStartAddr2Ptr(slot_start))
160         PoolOffsetFreelistEntry(next, make_shadow_match);
161   }
162 
CorruptNextForTesting(uintptr_t v)163   void CorruptNextForTesting(uintptr_t v) {
164     // We just need a value that can never be a valid pool offset here.
165     encoded_next_.Override(v);
166   }
167 
168   // Puts `slot_size` on the stack before crashing in case of memory
169   // corruption. Meant to be used to report the failed allocation size.
170   template <bool crash_on_corruption>
GetNextForThreadCache(size_t slot_size)171   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNextForThreadCache(
172       size_t slot_size) const {
173     return GetNextInternal<crash_on_corruption, /*for_thread_cache=*/true>(
174         slot_size);
175   }
GetNext(size_t slot_size)176   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNext(size_t slot_size) const {
177     return GetNextInternal<true, /*for_thread_cache=*/false>(slot_size);
178   }
179 
CheckFreeList(size_t slot_size)180   PA_NOINLINE void CheckFreeList(size_t slot_size) const {
181     for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
182       // `GetNext()` calls `IsWellFormed()`.
183     }
184   }
185 
CheckFreeListForThreadCache(size_t slot_size)186   PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
187     for (auto* entry = this; entry;
188          entry = entry->GetNextForThreadCache<true>(slot_size)) {
189       // `GetNextForThreadCache()` calls `IsWellFormed()`.
190     }
191   }
192 
SetNext(PoolOffsetFreelistEntry * entry)193   PA_ALWAYS_INLINE void SetNext(PoolOffsetFreelistEntry* entry) {
194     // SetNext() is either called on the freelist head, when provisioning new
195     // slots, or when GetNext() has been called before, no need to pass the
196     // size.
197 #if BUILDFLAG(PA_DCHECK_IS_ON)
198     // Regular freelists always point to an entry within the same super page.
199     //
200     // This is most likely a PartitionAlloc bug if this triggers.
201     if (PA_UNLIKELY(entry &&
202                     (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
203                         (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
204       FreelistCorruptionDetected(0);
205     }
206 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
207 
208     encoded_next_ = EncodedPoolOffset(entry);
209 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
210     shadow_ = encoded_next_.Inverted();
211 #endif
212   }
213 
214   // Zeroes out |this| before returning the slot. The pointer to this memory
215   // will be returned to the user (caller of Alloc()), thus can't have internal
216   // data.
ClearForAllocation()217   PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
218     encoded_next_.Override(0);
219 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
220     shadow_ = 0;
221 #endif
222     return SlotStartPtr2Addr(this);
223   }
224 
IsEncodedNextPtrZero()225   PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
226     return !encoded_next_;
227   }
228 
229  private:
230   template <bool crash_on_corruption, bool for_thread_cache>
GetNextInternal(size_t slot_size)231   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* GetNextInternal(
232       size_t slot_size) const {
233     // GetNext() can be called on discarded memory, in which case
234     // |encoded_next_| is 0, and none of the checks apply. Don't prefetch
235     // nullptr either.
236     if (IsEncodedNextPtrZero()) {
237       return nullptr;
238     }
239 
240     PoolInfo pool_info = GetPoolInfo(reinterpret_cast<uintptr_t>(this));
241     // We assume `(next_ & pool_info.base_mask) == 0` here.
242     // This assumption is checked in `IsWellFormed()` later.
243     auto* ret = encoded_next_.Decode(pool_info);
244     if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(pool_info, this, ret))) {
245       if constexpr (crash_on_corruption) {
246         // Put the corrupted data on the stack, it may give us more information
247         // about what kind of corruption that was.
248         PA_DEBUG_DATA_ON_STACK("first",
249                                static_cast<size_t>(encoded_next_.encoded_));
250 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
251         PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
252 #endif
253         FreelistCorruptionDetected(slot_size);
254       }
255       return nullptr;
256     }
257 
258     // In real-world profiles, the load of |encoded_next_| above is responsible
259     // for a large fraction of the allocation cost. However, we cannot
260     // anticipate it enough since it is accessed right after we know its
261     // address.
262     //
263     // In the case of repeated allocations, we can prefetch the access that will
264     // be done at the *next* allocation, which will touch *ret, prefetch it.
265     PA_PREFETCH(ret);
266     return ret;
267   }
268 
269   template <bool for_thread_cache>
IsWellFormed(const PoolInfo & pool_info,const PoolOffsetFreelistEntry * here,const PoolOffsetFreelistEntry * next)270   PA_ALWAYS_INLINE static bool IsWellFormed(
271       const PoolInfo& pool_info,
272       const PoolOffsetFreelistEntry* here,
273       const PoolOffsetFreelistEntry* next) {
274     // Don't allow the freelist to be blindly followed to any location.
275     // Checks following constraints:
276     // - `here->shadow_` must match an inversion of `here->next_` (if present).
277     // - `next` must not have bits in the pool base mask except a MTE tag.
278     // - `next` cannot point inside the metadata area.
279     // - `here` and `next` must belong to the same superpage, unless this is in
280     //   the thread cache (they even always belong to the same slot span).
281     // - `next` is marked as free in the free slot bitmap (if present).
282 
283     const uintptr_t here_address = SlotStartPtr2Addr(here);
284     const uintptr_t next_address = SlotStartPtr2Addr(next);
285 
286 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
287     bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
288 #else
289     constexpr bool shadow_ptr_ok = true;
290 #endif
291 
292     // `next_address` is MTE-untagged and `pool_info.base` does not contain a
293     // tag.
294     const bool pool_base_mask_matches =
295         (next_address & pool_info.base_mask) == pool_info.base;
296 
297     // This is necessary but not sufficient when quarantine is enabled, see
298     // SuperPagePayloadBegin() in partition_page.h. However we don't want to
299     // fetch anything from the root in this function.
300     const bool not_in_metadata =
301         (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
302 
303     if constexpr (for_thread_cache) {
304       return pool_base_mask_matches & shadow_ptr_ok & not_in_metadata;
305     }
306 
307     const bool same_super_page = (here_address & kSuperPageBaseMask) ==
308                                  (next_address & kSuperPageBaseMask);
309 
310 #if BUILDFLAG(USE_FREESLOT_BITMAP)
311     // TODO(crbug.com/1461983): Add support for freeslot bitmaps.
312     static_assert(false, "USE_FREESLOT_BITMAP not supported");
313 #else
314     constexpr bool marked_as_free_in_bitmap = true;
315 #endif
316 
317     return pool_base_mask_matches & shadow_ptr_ok & same_super_page &
318            marked_as_free_in_bitmap & not_in_metadata;
319   }
320 
321   // Expresses the next entry in the freelist as an offset in the
322   // same pool as `this`.
323   EncodedPoolOffset encoded_next_;
324   // This is intended to detect unintentional corruptions of the freelist.
325   // These can happen due to a Use-after-Free, or overflow of the previous
326   // allocation in the slot span.
327 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
328   uintptr_t shadow_;
329 #endif
330 };
331 
332 }  // namespace partition_alloc::internal
333 
334 #endif  // PARTITION_ALLOC_POOL_OFFSET_FREELIST_H_
335