1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
6 #define PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 #include <limits>
11 #include <tuple>
12 
13 #include "build/build_config.h"
14 #include "partition_alloc/address_pool_manager.h"
15 #include "partition_alloc/partition_address_space.h"
16 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
17 #include "partition_alloc/partition_alloc_base/component_export.h"
18 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
19 #include "partition_alloc/partition_alloc_buildflags.h"
20 #include "partition_alloc/partition_alloc_check.h"
21 #include "partition_alloc/partition_alloc_constants.h"
22 #include "partition_alloc/tagging.h"
23 #include "partition_alloc/thread_isolation/alignment.h"
24 
25 namespace partition_alloc::internal {
26 
27 static constexpr uint16_t kOffsetTagNotAllocated =
28     std::numeric_limits<uint16_t>::max();
29 static constexpr uint16_t kOffsetTagNormalBuckets =
30     std::numeric_limits<uint16_t>::max() - 1;
31 
32 // The main purpose of the reservation offset table is to easily locate the
33 // direct map reservation start address for any given address. There is one
34 // entry in the table for each super page.
35 //
36 // When PartitionAlloc reserves an address region it is always aligned to
37 // super page boundary. However, in 32-bit mode, the size may not be aligned
38 // super-page-aligned, so it may look like this:
39 //   |<--------- actual reservation size --------->|
40 //   +----------+----------+-----+-----------+-----+ - - - +
41 //   |SuperPage0|SuperPage1| ... |SuperPage K|SuperPage K+1|
42 //   +----------+----------+-----+-----------+-----+ - - -.+
43 //                                           |<-X->|<-Y*)->|
44 //
45 // The table entries for reserved super pages say how many pages away from the
46 // reservation the super page is:
47 //   +----------+----------+-----+-----------+-------------+
48 //   |Entry for |Entry for | ... |Entry for  |Entry for    |
49 //   |SuperPage0|SuperPage1|     |SuperPage K|SuperPage K+1|
50 //   +----------+----------+-----+-----------+-------------+
51 //   |     0    |    1     | ... |     K     |   K + 1     |
52 //   +----------+----------+-----+-----------+-------------+
53 //
54 // For an address Z, the reservation start can be found using this formula:
55 //   ((Z >> kSuperPageShift) - (the entry for Z)) << kSuperPageShift
56 //
57 // kOffsetTagNotAllocated is a special tag denoting that the super page isn't
58 // allocated by PartitionAlloc and kOffsetTagNormalBuckets denotes that it is
59 // used for a normal-bucket allocation, not for a direct-map allocation.
60 //
61 // *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
62 //    until X is unreserved, because PartitionAlloc always uses kSuperPageSize
63 //    alignment when reserving address spaces. One can use check "is in pool?"
64 //    to further determine which part of the super page is used by
65 //    PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
66 //    granularity is kSuperPageSize.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)67 class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
68     PA_THREAD_ISOLATED_ALIGN ReservationOffsetTable {
69  public:
70 #if BUILDFLAG(HAS_64_BIT_POINTERS)
71   // There is one reservation offset table per Pool in 64-bit mode.
72   static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
73   static constexpr size_t kReservationOffsetTableLength =
74       kReservationOffsetTableCoverage >> kSuperPageShift;
75 #else
76   // The size of the reservation offset table should cover the entire 32-bit
77   // address space, one element per super page.
78   static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
79   static constexpr size_t kReservationOffsetTableLength =
80       4 * kGiB / kSuperPageSize;
81 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
82   static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
83                 "Offsets should be smaller than kOffsetTagNormalBuckets.");
84 
85   struct _ReservationOffsetTable {
86     // The number of table elements is less than MAX_UINT16, so the element type
87     // can be uint16_t.
88     static_assert(
89         kReservationOffsetTableLength <= std::numeric_limits<uint16_t>::max(),
90         "Length of the reservation offset table must be less than MAX_UINT16");
91     uint16_t offsets[kReservationOffsetTableLength] = {};
92 
93     constexpr _ReservationOffsetTable() {
94       for (uint16_t& offset : offsets) {
95         offset = kOffsetTagNotAllocated;
96       }
97     }
98   };
99 #if BUILDFLAG(HAS_64_BIT_POINTERS)
100   // If thread isolation support is enabled, we need to write-protect the tables
101   // of the thread isolated pool. For this, we need to pad the tables so that
102   // the thread isolated ones start on a page boundary.
103 #if defined(__clang__)
104 #pragma clang diagnostic push
105 #pragma clang diagnostic ignored "-Wzero-length-array"
106 #endif
107   char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ(_ReservationOffsetTable,
108                                             kNumPools)] = {};
109 #if defined(__clang__)
110 #pragma clang diagnostic pop
111 #endif
112 
113   struct _ReservationOffsetTable tables[kNumPools];
114   static PA_CONSTINIT ReservationOffsetTable singleton_;
115 #else
116   // A single table for the entire 32-bit address space.
117   static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
118 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
119 };
120 
121 #if BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(pool_handle handle)122 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
123   PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
124   return ReservationOffsetTable::singleton_.tables[handle - 1].offsets;
125 }
126 
GetReservationOffsetTableEnd(pool_handle handle)127 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
128     pool_handle handle) {
129   return GetReservationOffsetTable(handle) +
130          ReservationOffsetTable::kReservationOffsetTableLength;
131 }
132 
GetReservationOffsetTable(uintptr_t address)133 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
134   pool_handle handle = GetPool(address);
135   return GetReservationOffsetTable(handle);
136 }
137 
GetReservationOffsetTableEnd(uintptr_t address)138 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
139     uintptr_t address) {
140   pool_handle handle = GetPool(address);
141   return GetReservationOffsetTableEnd(handle);
142 }
143 
ReservationOffsetPointer(pool_handle pool,uintptr_t offset_in_pool)144 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
145                                                     uintptr_t offset_in_pool) {
146   size_t table_index = offset_in_pool >> kSuperPageShift;
147   PA_DCHECK(table_index <
148             ReservationOffsetTable::kReservationOffsetTableLength);
149   return GetReservationOffsetTable(pool) + table_index;
150 }
151 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
GetReservationOffsetTable(uintptr_t address)152 PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
153   return ReservationOffsetTable::reservation_offset_table_.offsets;
154 }
155 
GetReservationOffsetTableEnd(uintptr_t address)156 PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
157     uintptr_t address) {
158   return ReservationOffsetTable::reservation_offset_table_.offsets +
159          ReservationOffsetTable::kReservationOffsetTableLength;
160 }
161 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
162 
ReservationOffsetPointer(uintptr_t address)163 PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
164 #if BUILDFLAG(HAS_64_BIT_POINTERS)
165   // In 64-bit mode, find the owning Pool and compute the offset from its base.
166   PartitionAddressSpace::PoolInfo info = GetPoolInfo(address);
167   return ReservationOffsetPointer(info.handle, info.offset);
168 #else
169   size_t table_index = address >> kSuperPageShift;
170   PA_DCHECK(table_index <
171             ReservationOffsetTable::kReservationOffsetTableLength);
172   return GetReservationOffsetTable(address) + table_index;
173 #endif
174 }
175 
ComputeReservationStart(uintptr_t address,uint16_t * offset_ptr)176 PA_ALWAYS_INLINE uintptr_t ComputeReservationStart(uintptr_t address,
177                                                    uint16_t* offset_ptr) {
178   return (address & kSuperPageBaseMask) -
179          (static_cast<size_t>(*offset_ptr) << kSuperPageShift);
180 }
181 
182 // If the given address doesn't point to direct-map allocated memory,
183 // returns 0.
GetDirectMapReservationStart(uintptr_t address)184 PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
185 #if BUILDFLAG(PA_DCHECK_IS_ON)
186   bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
187   bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
188   bool is_in_configurable_pool =
189       IsManagedByPartitionAllocConfigurablePool(address);
190 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
191   bool is_in_thread_isolated_pool =
192       IsManagedByPartitionAllocThreadIsolatedPool(address);
193 #endif
194 
195   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
196 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
197   PA_DCHECK(!is_in_brp_pool);
198 #endif
199 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
200   uint16_t* offset_ptr = ReservationOffsetPointer(address);
201   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
202   if (*offset_ptr == kOffsetTagNormalBuckets) {
203     return 0;
204   }
205   uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
206 #if BUILDFLAG(PA_DCHECK_IS_ON)
207   // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
208   // inside another macro (PA_DCHECK).
209 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
210   constexpr size_t kBRPOffset =
211       AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
212       AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
213 #else
214   constexpr size_t kBRPOffset = 0ull;
215 #endif  // !BUILDFLAG(HAS_64_BIT_POINTERS)
216   // Make sure the reservation start is in the same pool as |address|.
217   // In the 32-bit mode, the beginning of a reservation may be excluded
218   // from the BRP pool, so shift the pointer. The other pools don't have
219   // this logic.
220   PA_DCHECK(is_in_brp_pool ==
221             IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
222   PA_DCHECK(is_in_regular_pool ==
223             IsManagedByPartitionAllocRegularPool(reservation_start));
224   PA_DCHECK(is_in_configurable_pool ==
225             IsManagedByPartitionAllocConfigurablePool(reservation_start));
226 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
227   PA_DCHECK(is_in_thread_isolated_pool ==
228             IsManagedByPartitionAllocThreadIsolatedPool(reservation_start));
229 #endif
230   PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
231 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
232 
233   return reservation_start;
234 }
235 
236 #if BUILDFLAG(HAS_64_BIT_POINTERS)
237 // If the given address doesn't point to direct-map allocated memory,
238 // returns 0.
239 // This variant has better performance than the regular one on 64-bit builds if
240 // the Pool that an allocation belongs to is known.
241 PA_ALWAYS_INLINE uintptr_t
GetDirectMapReservationStart(uintptr_t address,pool_handle pool,uintptr_t offset_in_pool)242 GetDirectMapReservationStart(uintptr_t address,
243                              pool_handle pool,
244                              uintptr_t offset_in_pool) {
245   PA_DCHECK(AddressPoolManager::GetInstance().GetPoolBaseAddress(pool) +
246                 offset_in_pool ==
247             address);
248   uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
249   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
250   if (*offset_ptr == kOffsetTagNormalBuckets) {
251     return 0;
252   }
253   uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
254   PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
255   return reservation_start;
256 }
257 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
258 
259 // Returns true if |address| is the beginning of the first super page of a
260 // reservation, i.e. either a normal bucket super page, or the first super page
261 // of direct map.
262 // |address| must belong to an allocated super page.
IsReservationStart(uintptr_t address)263 PA_ALWAYS_INLINE bool IsReservationStart(uintptr_t address) {
264   uint16_t* offset_ptr = ReservationOffsetPointer(address);
265   PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
266   return ((*offset_ptr == kOffsetTagNormalBuckets) || (*offset_ptr == 0)) &&
267          (address % kSuperPageSize == 0);
268 }
269 
270 // Returns true if |address| belongs to a normal bucket super page.
IsManagedByNormalBuckets(uintptr_t address)271 PA_ALWAYS_INLINE bool IsManagedByNormalBuckets(uintptr_t address) {
272   uint16_t* offset_ptr = ReservationOffsetPointer(address);
273   return *offset_ptr == kOffsetTagNormalBuckets;
274 }
275 
276 // Returns true if |address| belongs to a direct map region.
IsManagedByDirectMap(uintptr_t address)277 PA_ALWAYS_INLINE bool IsManagedByDirectMap(uintptr_t address) {
278   uint16_t* offset_ptr = ReservationOffsetPointer(address);
279   return *offset_ptr != kOffsetTagNormalBuckets &&
280          *offset_ptr != kOffsetTagNotAllocated;
281 }
282 
283 // Returns true if |address| belongs to a normal bucket super page or a direct
284 // map region, i.e. belongs to an allocated super page.
IsManagedByNormalBucketsOrDirectMap(uintptr_t address)285 PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
286   uint16_t* offset_ptr = ReservationOffsetPointer(address);
287   return *offset_ptr != kOffsetTagNotAllocated;
288 }
289 
290 }  // namespace partition_alloc::internal
291 
292 #endif  // PARTITION_ALLOC_RESERVATION_OFFSET_TABLE_H_
293