1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
6 #define PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/freeslot_bitmap.h"
13 #include "partition_alloc/partition_alloc-inl.h"
14 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
15 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
16 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
17 #include "partition_alloc/partition_alloc_buildflags.h"
18 #include "partition_alloc/partition_alloc_config.h"
19 #include "partition_alloc/partition_alloc_constants.h"
20 
21 #if !defined(ARCH_CPU_BIG_ENDIAN)
22 #include "partition_alloc/reverse_bytes.h"
23 #endif  // !defined(ARCH_CPU_BIG_ENDIAN)
24 
25 namespace partition_alloc::internal {
26 
27 class EncodedNextFreelistEntry;
28 
29 class EncodedFreelistPtr {
30  private:
EncodedFreelistPtr(std::nullptr_t)31   PA_ALWAYS_INLINE constexpr explicit EncodedFreelistPtr(std::nullptr_t)
32       : encoded_(Transform(0)) {}
EncodedFreelistPtr(void * ptr)33   PA_ALWAYS_INLINE explicit EncodedFreelistPtr(void* ptr)
34       // The encoded pointer stays MTE-tagged.
35       : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
36 
Decode()37   PA_ALWAYS_INLINE EncodedNextFreelistEntry* Decode() const {
38     return reinterpret_cast<EncodedNextFreelistEntry*>(Transform(encoded_));
39   }
40 
Inverted()41   PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
42 
Override(uintptr_t encoded)43   PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
44     encoded_ = encoded;
45   }
46 
47   PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
48 
49   // Transform() works the same in both directions, so can be used for
50   // encoding and decoding.
Transform(uintptr_t address)51   PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
52     // We use bswap on little endian as a fast transformation for two reasons:
53     // 1) On 64 bit architectures, the pointer is very unlikely to be a
54     //    canonical address. Therefore, if an object is freed and its vtable is
55     //    used where the attacker doesn't get the chance to run allocations
56     //    between the free and use, the vtable dereference is likely to fault.
57     // 2) If the attacker has a linear buffer overflow and elects to try and
58     //    corrupt a freelist pointer, partial pointer overwrite attacks are
59     //    thwarted.
60     // For big endian, similar guarantees are arrived at with a negation.
61 #if defined(ARCH_CPU_BIG_ENDIAN)
62     uintptr_t transformed = ~address;
63 #else
64     uintptr_t transformed = ReverseBytes(address);
65 #endif
66     return transformed;
67   }
68 
69   uintptr_t encoded_;
70 
71   friend EncodedNextFreelistEntry;
72 };
73 
74 // Freelist entries are encoded for security reasons. See
75 // //base/allocator/partition_allocator/PartitionAlloc.md
76 // and |Transform()| for the rationale and mechanism, respectively.
77 class EncodedNextFreelistEntry {
78  private:
EncodedNextFreelistEntry(std::nullptr_t)79   constexpr explicit EncodedNextFreelistEntry(std::nullptr_t)
80       : encoded_next_(EncodedFreelistPtr(nullptr))
81 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
82         ,
83         shadow_(encoded_next_.Inverted())
84 #endif
85   {
86   }
EncodedNextFreelistEntry(EncodedNextFreelistEntry * next)87   explicit EncodedNextFreelistEntry(EncodedNextFreelistEntry* next)
88       : encoded_next_(EncodedFreelistPtr(next))
89 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
90         ,
91         shadow_(encoded_next_.Inverted())
92 #endif
93   {
94   }
95   // For testing only.
EncodedNextFreelistEntry(void * next,bool make_shadow_match)96   EncodedNextFreelistEntry(void* next, bool make_shadow_match)
97       : encoded_next_(EncodedFreelistPtr(next))
98 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
99         ,
100         shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
101 #endif
102   {
103   }
104 
105  public:
106   ~EncodedNextFreelistEntry() = delete;
107 
108   // Emplaces the freelist entry at the beginning of the given slot span, and
109   // initializes it as null-terminated.
EmplaceAndInitNull(void * slot_start_tagged)110   PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
111       void* slot_start_tagged) {
112     // |slot_start_tagged| is MTE-tagged.
113     auto* entry = new (slot_start_tagged) EncodedNextFreelistEntry(nullptr);
114     return entry;
115   }
EmplaceAndInitNull(uintptr_t slot_start)116   PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
117       uintptr_t slot_start) {
118     return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
119   }
120 
121   // Emplaces the freelist entry at the beginning of the given slot span, and
122   // initializes it with the given |next| pointer, but encoded.
123   //
124   // This freelist is built for the purpose of thread-cache. This means that we
125   // can't perform a check that this and the next pointer belong to the same
126   // super page, as thread-cache spans may chain slots across super pages.
127   PA_ALWAYS_INLINE static EncodedNextFreelistEntry*
EmplaceAndInitForThreadCache(uintptr_t slot_start,EncodedNextFreelistEntry * next)128   EmplaceAndInitForThreadCache(uintptr_t slot_start,
129                                EncodedNextFreelistEntry* next) {
130     auto* entry =
131         new (SlotStartAddr2Ptr(slot_start)) EncodedNextFreelistEntry(next);
132     return entry;
133   }
134 
135   // Emplaces the freelist entry at the beginning of the given slot span, and
136   // initializes it with the given |next| pointer.
137   //
138   // This is for testing purposes only! |make_shadow_match| allows you to choose
139   // if the shadow matches the next pointer properly or is trash.
EmplaceAndInitForTest(uintptr_t slot_start,void * next,bool make_shadow_match)140   PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
141                                                      void* next,
142                                                      bool make_shadow_match) {
143     new (SlotStartAddr2Ptr(slot_start))
144         EncodedNextFreelistEntry(next, make_shadow_match);
145   }
146 
CorruptNextForTesting(uintptr_t v)147   void CorruptNextForTesting(uintptr_t v) {
148     // We just need a value that can never be a valid pointer here.
149     encoded_next_.Override(EncodedFreelistPtr::Transform(v));
150   }
151 
152   // Puts `slot_size` on the stack before crashing in case of memory
153   // corruption. Meant to be used to report the failed allocation size.
154   template <bool crash_on_corruption>
GetNextForThreadCache(size_t slot_size)155   PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache(
156       size_t slot_size) const {
157     return GetNextInternal<crash_on_corruption, /*for_thread_cache=*/true>(
158         slot_size);
159   }
GetNext(size_t slot_size)160   PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const {
161     return GetNextInternal<true, /*for_thread_cache=*/false>(slot_size);
162   }
163 
CheckFreeList(size_t slot_size)164   PA_NOINLINE void CheckFreeList(size_t slot_size) const {
165     for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
166       // `GetNext()` calls `IsWellFormed()`.
167     }
168   }
169 
CheckFreeListForThreadCache(size_t slot_size)170   PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
171     for (auto* entry = this; entry;
172          entry = entry->GetNextForThreadCache<true>(slot_size)) {
173       // `GetNextForThreadCache()` calls `IsWellFormed()`.
174     }
175   }
176 
SetNext(EncodedNextFreelistEntry * entry)177   PA_ALWAYS_INLINE void SetNext(EncodedNextFreelistEntry* entry) {
178     // SetNext() is either called on the freelist head, when provisioning new
179     // slots, or when GetNext() has been called before, no need to pass the
180     // size.
181 #if BUILDFLAG(PA_DCHECK_IS_ON)
182     // Regular freelists always point to an entry within the same super page.
183     //
184     // This is most likely a PartitionAlloc bug if this triggers.
185     if (PA_UNLIKELY(entry &&
186                     (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
187                         (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
188       FreelistCorruptionDetected(0);
189     }
190 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
191 
192     encoded_next_ = EncodedFreelistPtr(entry);
193 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
194     shadow_ = encoded_next_.Inverted();
195 #endif
196   }
197 
198   // Zeroes out |this| before returning the slot. The pointer to this memory
199   // will be returned to the user (caller of Alloc()), thus can't have internal
200   // data.
ClearForAllocation()201   PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
202     encoded_next_.Override(0);
203 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
204     shadow_ = 0;
205 #endif
206     return SlotStartPtr2Addr(this);
207   }
208 
IsEncodedNextPtrZero()209   PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
210     return !encoded_next_;
211   }
212 
213  private:
214   template <bool crash_on_corruption, bool for_thread_cache>
GetNextInternal(size_t slot_size)215   PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal(
216       size_t slot_size) const {
217     // GetNext() can be called on discarded memory, in which case
218     // |encoded_next_| is 0, and none of the checks apply. Don't prefetch
219     // nullptr either.
220     if (IsEncodedNextPtrZero()) {
221       return nullptr;
222     }
223 
224     auto* ret = encoded_next_.Decode();
225     if (PA_UNLIKELY(!IsWellFormed<for_thread_cache>(this, ret))) {
226       if constexpr (crash_on_corruption) {
227         // Put the corrupted data on the stack, it may give us more information
228         // about what kind of corruption that was.
229         PA_DEBUG_DATA_ON_STACK("first",
230                                static_cast<size_t>(encoded_next_.encoded_));
231 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
232         PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
233 #endif
234         FreelistCorruptionDetected(slot_size);
235       }
236       return nullptr;
237     }
238 
239     // In real-world profiles, the load of |encoded_next_| above is responsible
240     // for a large fraction of the allocation cost. However, we cannot
241     // anticipate it enough since it is accessed right after we know its
242     // address.
243     //
244     // In the case of repeated allocations, we can prefetch the access that will
245     // be done at the *next* allocation, which will touch *ret, prefetch it.
246     PA_PREFETCH(ret);
247     return ret;
248   }
249 
250   template <bool for_thread_cache>
IsWellFormed(const EncodedNextFreelistEntry * here,const EncodedNextFreelistEntry * next)251   PA_ALWAYS_INLINE static bool IsWellFormed(
252       const EncodedNextFreelistEntry* here,
253       const EncodedNextFreelistEntry* next) {
254     // Don't allow the freelist to be blindly followed to any location.
255     // Checks following constraints:
256     // - `here->shadow_` must match an inversion of `here->next_` (if present).
257     // - `next` cannot point inside the metadata area.
258     // - `here` and `next` must belong to the same superpage, unless this is in
259     //   the thread cache (they even always belong to the same slot span).
260     // - `next` is marked as free in the free slot bitmap (if present).
261 
262     const uintptr_t here_address = SlotStartPtr2Addr(here);
263     const uintptr_t next_address = SlotStartPtr2Addr(next);
264 
265 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
266     bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
267 #else
268     constexpr bool shadow_ptr_ok = true;
269 #endif
270 
271     // This is necessary but not sufficient when quarantine is enabled, see
272     // SuperPagePayloadBegin() in partition_page.h. However we don't want to
273     // fetch anything from the root in this function.
274     const bool not_in_metadata =
275         (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
276 
277     if constexpr (for_thread_cache) {
278       return shadow_ptr_ok & not_in_metadata;
279     }
280 
281     const bool same_super_page = (here_address & kSuperPageBaseMask) ==
282                                  (next_address & kSuperPageBaseMask);
283 
284 #if BUILDFLAG(USE_FREESLOT_BITMAP)
285     bool marked_as_free_in_bitmap = !FreeSlotBitmapSlotIsUsed(next_address);
286 #else
287     constexpr bool marked_as_free_in_bitmap = true;
288 #endif
289 
290     return shadow_ptr_ok & same_super_page & marked_as_free_in_bitmap &
291            not_in_metadata;
292   }
293 
294   EncodedFreelistPtr encoded_next_;
295   // This is intended to detect unintentional corruptions of the freelist.
296   // These can happen due to a Use-after-Free, or overflow of the previous
297   // allocation in the slot span.
298 #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
299   uintptr_t shadow_;
300 #endif
301 };
302 
303 }  // namespace partition_alloc::internal
304 
305 #endif  // PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
306