1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
6 #define PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
7
8 #include <bit>
9 #include <cstddef>
10 #include <utility>
11
12 #include "build/build_config.h"
13 #include "partition_alloc/address_pool_manager_types.h"
14 #include "partition_alloc/page_allocator_constants.h"
15 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
16 #include "partition_alloc/partition_alloc_base/component_export.h"
17 #include "partition_alloc/partition_alloc_base/notreached.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_check.h"
20 #include "partition_alloc/partition_alloc_config.h"
21 #include "partition_alloc/partition_alloc_constants.h"
22 #include "partition_alloc/partition_alloc_forward.h"
23 #include "partition_alloc/thread_isolation/alignment.h"
24
25 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
26 #include "partition_alloc/thread_isolation/thread_isolation.h"
27 #endif
28
29 // The feature is not applicable to 32-bit address space.
30 #if BUILDFLAG(HAS_64_BIT_POINTERS)
31
32 namespace partition_alloc {
33
34 namespace internal {
35
36 // Manages PartitionAlloc address space, which is split into pools.
37 // See `glossary.md`.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)38 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
39 public:
40 // Represents pool-specific information about a given address.
41 struct PoolInfo {
42 pool_handle handle;
43 uintptr_t base;
44 uintptr_t base_mask;
45 uintptr_t offset;
46 };
47
48 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
49 PA_ALWAYS_INLINE static uintptr_t BRPPoolBaseMask() {
50 return setup_.brp_pool_base_mask_;
51 }
52 PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
53 return setup_.regular_pool_base_mask_;
54 }
55 #else
56 PA_ALWAYS_INLINE static constexpr uintptr_t BRPPoolBaseMask() {
57 return kBRPPoolBaseMask;
58 }
59 PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
60 return kRegularPoolBaseMask;
61 }
62 #endif
63
64 PA_ALWAYS_INLINE static PoolInfo GetPoolInfo(uintptr_t address) {
65 // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
66 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
67 PA_DCHECK(!IsInBRPPool(address));
68 #endif
69 pool_handle pool = kNullPoolHandle;
70 uintptr_t base = 0;
71 uintptr_t base_mask = 0;
72 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
73 if (IsInBRPPool(address)) {
74 pool = kBRPPoolHandle;
75 base = setup_.brp_pool_base_address_;
76 base_mask = BRPPoolBaseMask();
77 } else
78 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
79 if (IsInRegularPool(address)) {
80 pool = kRegularPoolHandle;
81 base = setup_.regular_pool_base_address_;
82 base_mask = RegularPoolBaseMask();
83 } else if (IsInConfigurablePool(address)) {
84 PA_DCHECK(IsConfigurablePoolInitialized());
85 pool = kConfigurablePoolHandle;
86 base = setup_.configurable_pool_base_address_;
87 base_mask = setup_.configurable_pool_base_mask_;
88 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
89 } else if (IsInThreadIsolatedPool(address)) {
90 pool = kThreadIsolatedPoolHandle;
91 base = setup_.thread_isolated_pool_base_address_;
92 base_mask = kThreadIsolatedPoolBaseMask;
93 #endif
94 } else {
95 PA_NOTREACHED();
96 }
97 return PoolInfo{.handle = pool,
98 .base = base,
99 .base_mask = base_mask,
100 .offset = address - base};
101 }
102 PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
103 return kConfigurablePoolMaxSize;
104 }
105 PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
106 return kConfigurablePoolMinSize;
107 }
108
109 // Initialize pools (except for the configurable one).
110 //
111 // This function must only be called from the main thread.
112 static void Init();
113 // Initialize the ConfigurablePool at the given address |pool_base|. It must
114 // be aligned to the size of the pool. The size must be a power of two and
115 // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
116 //
117 // This function must only be called from the main thread.
118 static void InitConfigurablePool(uintptr_t pool_base, size_t size);
119 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
120 static void InitThreadIsolatedPool(ThreadIsolationOption thread_isolation);
121 static void UninitThreadIsolatedPoolForTesting();
122 #endif
123 static void UninitForTesting();
124 static void UninitConfigurablePoolForTesting();
125
126 PA_ALWAYS_INLINE static bool IsInitialized() {
127 // Either neither or both regular and BRP pool are initialized. The
128 // configurable and thread isolated pool are initialized separately.
129 if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
130 PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
131 return true;
132 }
133
134 PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
135 return false;
136 }
137
138 PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
139 return setup_.configurable_pool_base_address_ !=
140 kUninitializedPoolBaseAddress;
141 }
142
143 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
144 PA_ALWAYS_INLINE static bool IsThreadIsolatedPoolInitialized() {
145 return setup_.thread_isolated_pool_base_address_ !=
146 kUninitializedPoolBaseAddress;
147 }
148 #endif
149
150 // Returns false for nullptr.
151 PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
152 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
153 const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
154 #else
155 constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
156 #endif
157 return (address & regular_pool_base_mask) ==
158 setup_.regular_pool_base_address_;
159 }
160
161 PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
162 return setup_.regular_pool_base_address_;
163 }
164
165 // Returns false for nullptr.
166 PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
167 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
168 const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
169 #else
170 constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
171 #endif
172 return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
173 }
174
175 #if BUILDFLAG(GLUE_CORE_POOLS)
176 // Checks whether the address belongs to either regular or BRP pool.
177 // Returns false for nullptr.
178 PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
179 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
180 const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
181 #else
182 // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
183 // regular pool, effectively forming one virtual pool of a twice bigger
184 // size. Adjust the mask appropriately.
185 constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
186 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
187 bool ret =
188 (address & core_pools_base_mask) == setup_.regular_pool_base_address_;
189 PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
190 return ret;
191 }
192 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
193 PA_ALWAYS_INLINE static size_t CorePoolsSize() {
194 return RegularPoolSize() * 2;
195 }
196 #else
197 PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
198 return RegularPoolSize() * 2;
199 }
200 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
201 #endif // BUILDFLAG(GLUE_CORE_POOLS)
202
203 PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
204 PA_DCHECK(IsInBRPPool(address));
205 return address - setup_.brp_pool_base_address_;
206 }
207
208 // Returns false for nullptr.
209 PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
210 return (address & setup_.configurable_pool_base_mask_) ==
211 setup_.configurable_pool_base_address_;
212 }
213
214 PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
215 return setup_.configurable_pool_base_address_;
216 }
217
218 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
219 // Returns false for nullptr.
220 PA_ALWAYS_INLINE static bool IsInThreadIsolatedPool(uintptr_t address) {
221 return (address & kThreadIsolatedPoolBaseMask) ==
222 setup_.thread_isolated_pool_base_address_;
223 }
224 #endif
225
226 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
227 PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
228 if (pool == kRegularPoolHandle) {
229 return regular_pool_shadow_offset_;
230 } else if (pool == kBRPPoolHandle) {
231 return brp_pool_shadow_offset_;
232 } else {
233 // TODO(crbug.com/1362969): Add shadow for configurable pool as well.
234 // Shadow is not created for ConfigurablePool for now, so this part should
235 // be unreachable.
236 PA_NOTREACHED();
237 }
238 }
239 #endif
240
241 // PartitionAddressSpace is static_only class.
242 PartitionAddressSpace() = delete;
243 PartitionAddressSpace(const PartitionAddressSpace&) = delete;
244 void* operator new(size_t) = delete;
245 void* operator new(size_t, void*) = delete;
246
247 private:
248 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
249 PA_ALWAYS_INLINE static size_t RegularPoolSize();
250 PA_ALWAYS_INLINE static size_t BRPPoolSize();
251 #else
252 // The pool sizes should be as large as maximum whenever possible.
253 PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
254 return kRegularPoolSize;
255 }
256 PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
257 return kBRPPoolSize;
258 }
259 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
260
261 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
262 PA_ALWAYS_INLINE static constexpr size_t ThreadIsolatedPoolSize() {
263 return kThreadIsolatedPoolSize;
264 }
265 #endif
266
267 // On 64-bit systems, PA allocates from several contiguous, mutually disjoint
268 // pools. The BRP pool is where all allocations have a BRP ref-count, thus
269 // pointers pointing there can use a BRP protection against UaF. Allocations
270 // in the other pools don't have that.
271 //
272 // Pool sizes have to be the power of two. Each pool will be aligned at its
273 // own size boundary.
274 //
275 // NOTE! The BRP pool must be preceded by an inaccessible region. This is to
276 // prevent a pointer to the end of a non-BRP-pool allocation from falling into
277 // the BRP pool, thus triggering BRP mechanism and likely crashing. This
278 // "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
279 // allocation granularity unit.
280 //
281 // The ConfigurablePool is an optional Pool that can be created inside an
282 // existing mapping provided by the embedder. This Pool can be used when
283 // certain PA allocations must be located inside a given virtual address
284 // region. One use case for this Pool is V8 Sandbox, which requires that
285 // ArrayBuffers be located inside of it.
286 static constexpr size_t kRegularPoolSize = kPoolMaxSize;
287 static constexpr size_t kBRPPoolSize = kPoolMaxSize;
288 static_assert(std::has_single_bit(kRegularPoolSize));
289 static_assert(std::has_single_bit(kBRPPoolSize));
290 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
291 static constexpr size_t kThreadIsolatedPoolSize = kGiB / 4;
292 static_assert(std::has_single_bit(kThreadIsolatedPoolSize));
293 #endif
294 static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
295 static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
296 static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
297 static_assert(std::has_single_bit(kConfigurablePoolMaxSize));
298 static_assert(std::has_single_bit(kConfigurablePoolMinSize));
299
300 #if BUILDFLAG(IS_IOS)
301
302 #if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
303 #error iOS is only supported with a dynamically sized GigaCase.
304 #endif
305
306 // We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
307 // since the test process cannot use an extended virtual address space (see
308 // crbug.com/1250788).
309 static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
310 static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
311 static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
312 static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
313 static_assert(std::has_single_bit(kRegularPoolSizeForIOSTestProcess));
314 static_assert(std::has_single_bit(kBRPPoolSizeForIOSTestProcess));
315 #endif // BUILDFLAG(IOS_IOS)
316
317 #if !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
318 // Masks used to easy determine belonging to a pool.
319 static constexpr uintptr_t kRegularPoolOffsetMask =
320 static_cast<uintptr_t>(kRegularPoolSize) - 1;
321 static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
322 static constexpr uintptr_t kBRPPoolOffsetMask =
323 static_cast<uintptr_t>(kBRPPoolSize) - 1;
324 static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
325 #endif // !PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
326
327 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
328 static constexpr uintptr_t kThreadIsolatedPoolOffsetMask =
329 static_cast<uintptr_t>(kThreadIsolatedPoolSize) - 1;
330 static constexpr uintptr_t kThreadIsolatedPoolBaseMask =
331 ~kThreadIsolatedPoolOffsetMask;
332 #endif
333
334 // This must be set to such a value that IsIn*Pool() always returns false when
335 // the pool isn't initialized.
336 static constexpr uintptr_t kUninitializedPoolBaseAddress =
337 static_cast<uintptr_t>(-1);
338
339 struct alignas(kPartitionCachelineSize) PA_THREAD_ISOLATED_ALIGN PoolSetup {
340 // Before PartitionAddressSpace::Init(), no allocation are allocated from a
341 // reserved address space. Therefore, set *_pool_base_address_ initially to
342 // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
343 constexpr PoolSetup() = default;
344
345 // Using a struct to enforce alignment and padding
346 uintptr_t regular_pool_base_address_ = kUninitializedPoolBaseAddress;
347 uintptr_t brp_pool_base_address_ = kUninitializedPoolBaseAddress;
348 uintptr_t configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
349 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
350 uintptr_t thread_isolated_pool_base_address_ =
351 kUninitializedPoolBaseAddress;
352 #endif
353 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
354 uintptr_t regular_pool_base_mask_ = 0;
355 uintptr_t brp_pool_base_mask_ = 0;
356 #if BUILDFLAG(GLUE_CORE_POOLS)
357 uintptr_t core_pools_base_mask_ = 0;
358 #endif
359 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
360 uintptr_t configurable_pool_base_mask_ = 0;
361 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
362 ThreadIsolationOption thread_isolation_;
363 #endif
364 };
365 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
366 static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
367 "PoolSetup has to fill a page(s)");
368 #else
369 static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
370 "PoolSetup has to fill a cacheline(s)");
371 #endif
372
373 // See the comment describing the address layout above.
374 //
375 // These are write-once fields, frequently accessed thereafter. Make sure they
376 // don't share a cacheline with other, potentially writeable data, through
377 // alignment and padding.
378 static PoolSetup setup_ PA_CONSTINIT;
379
380 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
381 static std::ptrdiff_t regular_pool_shadow_offset_;
382 static std::ptrdiff_t brp_pool_shadow_offset_;
383 #endif
384
385 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
386 // If we use thread isolation, we need to write-protect its metadata.
387 // Allow the function to get access to the PoolSetup.
388 friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
389 #endif
390 };
391
GetPoolInfo(uintptr_t address)392 PA_ALWAYS_INLINE PartitionAddressSpace::PoolInfo GetPoolInfo(
393 uintptr_t address) {
394 return PartitionAddressSpace::GetPoolInfo(address);
395 }
396
GetPool(uintptr_t address)397 PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
398 return GetPoolInfo(address).handle;
399 }
400
OffsetInBRPPool(uintptr_t address)401 PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
402 return PartitionAddressSpace::OffsetInBRPPool(address);
403 }
404
405 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
ShadowPoolOffset(pool_handle pool)406 PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
407 return PartitionAddressSpace::ShadowPoolOffset(pool);
408 }
409 #endif
410
411 } // namespace internal
412
413 // Returns false for nullptr.
IsManagedByPartitionAlloc(uintptr_t address)414 PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
415 // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
416 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
417 PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
418 #endif
419 return internal::PartitionAddressSpace::IsInRegularPool(address)
420 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
421 || internal::PartitionAddressSpace::IsInBRPPool(address)
422 #endif
423 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
424 || internal::PartitionAddressSpace::IsInThreadIsolatedPool(address)
425 #endif
426 || internal::PartitionAddressSpace::IsInConfigurablePool(address);
427 }
428
429 // Returns false for nullptr.
IsManagedByPartitionAllocRegularPool(uintptr_t address)430 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
431 return internal::PartitionAddressSpace::IsInRegularPool(address);
432 }
433
434 // Returns false for nullptr.
IsManagedByPartitionAllocBRPPool(uintptr_t address)435 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
436 return internal::PartitionAddressSpace::IsInBRPPool(address);
437 }
438
439 #if BUILDFLAG(GLUE_CORE_POOLS)
440 // Checks whether the address belongs to either regular or BRP pool.
441 // Returns false for nullptr.
IsManagedByPartitionAllocCorePools(uintptr_t address)442 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
443 return internal::PartitionAddressSpace::IsInCorePools(address);
444 }
445 #endif // BUILDFLAG(GLUE_CORE_POOLS)
446
447 // Returns false for nullptr.
IsManagedByPartitionAllocConfigurablePool(uintptr_t address)448 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
449 uintptr_t address) {
450 return internal::PartitionAddressSpace::IsInConfigurablePool(address);
451 }
452
453 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
454 // Returns false for nullptr.
IsManagedByPartitionAllocThreadIsolatedPool(uintptr_t address)455 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocThreadIsolatedPool(
456 uintptr_t address) {
457 return internal::PartitionAddressSpace::IsInThreadIsolatedPool(address);
458 }
459 #endif
460
IsConfigurablePoolAvailable()461 PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
462 return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
463 }
464
465 } // namespace partition_alloc
466
467 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
468
469 #endif // PARTITION_ALLOC_PARTITION_ADDRESS_SPACE_H_
470