1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/partition_address_space.h"
6
7 #include <array>
8 #include <bit>
9 #include <cstddef>
10 #include <cstdint>
11 #include <ostream>
12 #include <string>
13
14 #include "build/build_config.h"
15 #include "partition_alloc/address_pool_manager.h"
16 #include "partition_alloc/compressed_pointer.h"
17 #include "partition_alloc/page_allocator.h"
18 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
19 #include "partition_alloc/partition_alloc_base/debug/alias.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_check.h"
22 #include "partition_alloc/partition_alloc_config.h"
23 #include "partition_alloc/partition_alloc_constants.h"
24 #include "partition_alloc/thread_isolation/thread_isolation.h"
25
26 #if BUILDFLAG(IS_IOS)
27 #include <mach-o/dyld.h>
28 #endif
29
30 #if BUILDFLAG(IS_WIN)
31 #include <windows.h>
32 #endif // BUILDFLAG(IS_WIN)
33
34 #if PA_CONFIG(ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
35 #include <sys/mman.h>
36 #endif
37
38 namespace partition_alloc::internal {
39
40 #if BUILDFLAG(HAS_64_BIT_POINTERS)
41
42 namespace {
43
44 #if BUILDFLAG(IS_WIN)
45
HandlePoolAllocFailureOutOfVASpace()46 PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
47 PA_NO_CODE_FOLDING();
48 PA_CHECK(false);
49 }
50
HandlePoolAllocFailureOutOfCommitCharge()51 PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
52 PA_NO_CODE_FOLDING();
53 PA_CHECK(false);
54 }
55 #endif // BUILDFLAG(IS_WIN)
56
HandlePoolAllocFailure()57 PA_NOINLINE void HandlePoolAllocFailure() {
58 PA_NO_CODE_FOLDING();
59 uint32_t alloc_page_error_code = GetAllocPageErrorCode();
60 PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
61 // It's important to easily differentiate these two failures on Windows, so
62 // crash with different stacks.
63 #if BUILDFLAG(IS_WIN)
64 if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
65 // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
66 // it must be VA space exhaustion.
67 HandlePoolAllocFailureOutOfVASpace();
68 } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT ||
69 alloc_page_error_code == ERROR_COMMITMENT_MINIMUM) {
70 // Should not happen, since as of Windows 8.1+, reserving address space
71 // should not be charged against the commit limit, aside from a very small
72 // amount per 64kiB block. Keep this path anyway, to check in crash reports.
73 HandlePoolAllocFailureOutOfCommitCharge();
74 } else
75 #endif // BUILDFLAG(IS_WIN)
76 {
77 PA_CHECK(false);
78 }
79 }
80
81 } // namespace
82
83 PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
84
85 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
86 std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
87 std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
88 #endif
89
90 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
91 #if !BUILDFLAG(IS_IOS)
92 #error Dynamic pool size is only supported on iOS.
93 #endif
94
95 namespace {
IsIOSTestProcess()96 bool IsIOSTestProcess() {
97 // On iOS, only applications with the extended virtual addressing entitlement
98 // can use a large address space. Since Earl Grey test runner apps cannot get
99 // entitlements, they must use a much smaller pool size. Similarly,
100 // integration tests for ChromeWebView end up with two PartitionRoots since
101 // both the integration tests and ChromeWebView have a copy of base/. Even
102 // with the entitlement, there is insufficient address space for two
103 // PartitionRoots, so a smaller pool size is needed.
104
105 // Use a fixed buffer size to avoid allocation inside the allocator.
106 constexpr size_t path_buffer_size = 8192;
107 char executable_path[path_buffer_size];
108
109 uint32_t executable_length = path_buffer_size;
110 int rv = _NSGetExecutablePath(executable_path, &executable_length);
111 PA_CHECK(!rv);
112 size_t executable_path_length =
113 std::char_traits<char>::length(executable_path);
114
115 auto has_suffix = [&](const char* suffix) -> bool {
116 size_t suffix_length = std::char_traits<char>::length(suffix);
117 if (executable_path_length < suffix_length) {
118 return false;
119 }
120 return std::char_traits<char>::compare(
121 executable_path + (executable_path_length - suffix_length),
122 suffix, suffix_length) == 0;
123 };
124
125 return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
126 }
127 } // namespace
128
RegularPoolSize()129 PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
130 return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
131 : kRegularPoolSize;
132 }
BRPPoolSize()133 PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
134 return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
135 }
136 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
137
Init()138 void PartitionAddressSpace::Init() {
139 if (IsInitialized()) {
140 return;
141 }
142
143 size_t regular_pool_size = RegularPoolSize();
144 size_t brp_pool_size = BRPPoolSize();
145
146 #if BUILDFLAG(GLUE_CORE_POOLS)
147 // Gluing core pools (regular & BRP) makes sense only when both pools are of
148 // the same size. This the only way we can check belonging to either of the
149 // two with a single bitmask operation.
150 PA_CHECK(regular_pool_size == brp_pool_size);
151
152 // TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
153 int pools_fd = -1;
154
155 size_t glued_pool_sizes = regular_pool_size * 2;
156 // Note, BRP pool requires to be preceded by a "forbidden zone", which is
157 // conveniently taken care of by the last guard page of the regular pool.
158 setup_.regular_pool_base_address_ =
159 AllocPages(glued_pool_sizes, glued_pool_sizes,
160 PageAccessibilityConfiguration(
161 PageAccessibilityConfiguration::kInaccessible),
162 PageTag::kPartitionAlloc, pools_fd);
163 if (!setup_.regular_pool_base_address_) {
164 HandlePoolAllocFailure();
165 }
166 setup_.brp_pool_base_address_ =
167 setup_.regular_pool_base_address_ + regular_pool_size;
168 #else // BUILDFLAG(GLUE_CORE_POOLS)
169 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
170 int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
171 #else
172 int regular_pool_fd = -1;
173 #endif
174 setup_.regular_pool_base_address_ =
175 AllocPages(regular_pool_size, regular_pool_size,
176 PageAccessibilityConfiguration(
177 PageAccessibilityConfiguration::kInaccessible),
178 PageTag::kPartitionAlloc, regular_pool_fd);
179 if (!setup_.regular_pool_base_address_) {
180 HandlePoolAllocFailure();
181 }
182
183 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
184 int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
185 #else
186 int brp_pool_fd = -1;
187 #endif
188 // Reserve an extra allocation granularity unit before the BRP pool, but keep
189 // the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
190 // is a valid pointer, and having a "forbidden zone" before the BRP pool
191 // prevents such a pointer from "sneaking into" the pool.
192 const size_t kForbiddenZoneSize = PageAllocationGranularity();
193 uintptr_t base_address = AllocPagesWithAlignOffset(
194 0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
195 brp_pool_size - kForbiddenZoneSize,
196 PageAccessibilityConfiguration(
197 PageAccessibilityConfiguration::kInaccessible),
198 PageTag::kPartitionAlloc, brp_pool_fd);
199 if (!base_address) {
200 HandlePoolAllocFailure();
201 }
202 setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
203 #endif // BUILDFLAG(GLUE_CORE_POOLS)
204
205 #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
206 setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
207 setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
208 #if BUILDFLAG(GLUE_CORE_POOLS)
209 // When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
210 // regular pool, effectively forming one virtual pool of a twice bigger
211 // size. Adjust the mask appropriately.
212 setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
213 PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
214 #endif
215 #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
216
217 AddressPoolManager::GetInstance().Add(
218 kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
219 AddressPoolManager::GetInstance().Add(
220 kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
221
222 // Sanity check pool alignment.
223 PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
224 PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
225 #if BUILDFLAG(GLUE_CORE_POOLS)
226 PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
227 #endif
228
229 // Sanity check pool belonging.
230 PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
231 PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
232 PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
233 regular_pool_size - 1));
234 PA_DCHECK(
235 !IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
236 PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
237 PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
238 PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
239 PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
240 #if BUILDFLAG(GLUE_CORE_POOLS)
241 PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
242 PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
243 PA_DCHECK(
244 IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
245 PA_DCHECK(
246 IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
247 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
248 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
249 PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
250 PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
251 #endif // BUILDFLAG(GLUE_CORE_POOLS)
252
253 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
254 // Reserve memory for PCScan quarantine card table.
255 uintptr_t requested_address = setup_.regular_pool_base_address_;
256 uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
257 kRegularPoolHandle, requested_address, kSuperPageSize);
258 PA_CHECK(requested_address == actual_address)
259 << "QuarantineCardTable is required to be allocated at the beginning of "
260 "the regular pool";
261 #endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
262
263 #if PA_CONFIG(ENABLE_SHADOW_METADATA)
264 // Reserve memory for the shadow pools.
265 uintptr_t regular_pool_shadow_address =
266 AllocPages(regular_pool_size, regular_pool_size,
267 PageAccessibilityConfiguration(
268 PageAccessibilityConfiguration::kInaccessible),
269 PageTag::kPartitionAlloc, regular_pool_fd);
270 regular_pool_shadow_offset_ =
271 regular_pool_shadow_address - setup_.regular_pool_base_address_;
272
273 uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
274 0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
275 brp_pool_size - kForbiddenZoneSize,
276 PageAccessibilityConfiguration(
277 PageAccessibilityConfiguration::kInaccessible),
278 PageTag::kPartitionAlloc, brp_pool_fd);
279 brp_pool_shadow_offset_ =
280 brp_pool_shadow_address - setup_.brp_pool_base_address_;
281 #endif
282
283 #if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
284 CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
285 #endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
286 }
287
InitConfigurablePool(uintptr_t pool_base,size_t size)288 void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
289 size_t size) {
290 // The ConfigurablePool must only be initialized once.
291 PA_CHECK(!IsConfigurablePoolInitialized());
292
293 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
294 // It's possible that the thread isolated pool has been initialized first, in
295 // which case the setup_ memory has been made read-only. Remove the protection
296 // temporarily.
297 if (IsThreadIsolatedPoolInitialized()) {
298 UnprotectThreadIsolatedGlobals();
299 }
300 #endif
301
302 PA_CHECK(pool_base);
303 PA_CHECK(size <= kConfigurablePoolMaxSize);
304 PA_CHECK(size >= kConfigurablePoolMinSize);
305 PA_CHECK(std::has_single_bit(size));
306 PA_CHECK(pool_base % size == 0);
307
308 setup_.configurable_pool_base_address_ = pool_base;
309 setup_.configurable_pool_base_mask_ = ~(size - 1);
310
311 AddressPoolManager::GetInstance().Add(
312 kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
313
314 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
315 // Put the metadata protection back in place.
316 if (IsThreadIsolatedPoolInitialized()) {
317 WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
318 }
319 #endif
320 }
321
322 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
InitThreadIsolatedPool(ThreadIsolationOption thread_isolation)323 void PartitionAddressSpace::InitThreadIsolatedPool(
324 ThreadIsolationOption thread_isolation) {
325 // The ThreadIsolated pool can't be initialized with conflicting settings.
326 if (IsThreadIsolatedPoolInitialized()) {
327 PA_CHECK(setup_.thread_isolation_ == thread_isolation);
328 return;
329 }
330
331 size_t pool_size = ThreadIsolatedPoolSize();
332 setup_.thread_isolated_pool_base_address_ =
333 AllocPages(pool_size, pool_size,
334 PageAccessibilityConfiguration(
335 PageAccessibilityConfiguration::kInaccessible),
336 PageTag::kPartitionAlloc);
337 if (!setup_.thread_isolated_pool_base_address_) {
338 HandlePoolAllocFailure();
339 }
340
341 PA_DCHECK(!(setup_.thread_isolated_pool_base_address_ & (pool_size - 1)));
342 setup_.thread_isolation_ = thread_isolation;
343 AddressPoolManager::GetInstance().Add(
344 kThreadIsolatedPoolHandle, setup_.thread_isolated_pool_base_address_,
345 pool_size);
346
347 PA_DCHECK(
348 !IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ - 1));
349 PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_));
350 PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
351 pool_size - 1));
352 PA_DCHECK(!IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ +
353 pool_size));
354
355 // TODO(1362969): support PA_ENABLE_SHADOW_METADATA
356 }
357 #endif // BUILDFLAG(ENABLE_THREAD_ISOLATION)
358
UninitForTesting()359 void PartitionAddressSpace::UninitForTesting() {
360 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
361 UninitThreadIsolatedPoolForTesting(); // IN-TEST
362 #endif
363 #if BUILDFLAG(GLUE_CORE_POOLS)
364 // The core pools (regular & BRP) were allocated using a single allocation of
365 // double size.
366 FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
367 #else // BUILDFLAG(GLUE_CORE_POOLS)
368 FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
369 // For BRP pool, the allocation region includes a "forbidden zone" before the
370 // pool.
371 const size_t kForbiddenZoneSize = PageAllocationGranularity();
372 FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
373 BRPPoolSize() + kForbiddenZoneSize);
374 #endif // BUILDFLAG(GLUE_CORE_POOLS)
375 // Do not free pages for the configurable pool, because its memory is owned
376 // by someone else, but deinitialize it nonetheless.
377 setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
378 setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
379 setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
380 setup_.configurable_pool_base_mask_ = 0;
381 AddressPoolManager::GetInstance().ResetForTesting();
382 #if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
383 CompressedPointerBaseGlobal::ResetBaseForTesting();
384 #endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
385 }
386
UninitConfigurablePoolForTesting()387 void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
388 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
389 // It's possible that the thread isolated pool has been initialized first, in
390 // which case the setup_ memory has been made read-only. Remove the protection
391 // temporarily.
392 if (IsThreadIsolatedPoolInitialized()) {
393 UnprotectThreadIsolatedGlobals();
394 }
395 #endif
396 AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
397 setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
398 setup_.configurable_pool_base_mask_ = 0;
399 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
400 // Put the metadata protection back in place.
401 if (IsThreadIsolatedPoolInitialized()) {
402 WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_);
403 }
404 #endif
405 }
406
407 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
UninitThreadIsolatedPoolForTesting()408 void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() {
409 if (IsThreadIsolatedPoolInitialized()) {
410 UnprotectThreadIsolatedGlobals();
411 #if BUILDFLAG(PA_DCHECK_IS_ON)
412 ThreadIsolationSettings::settings.enabled = false;
413 #endif
414
415 FreePages(setup_.thread_isolated_pool_base_address_,
416 ThreadIsolatedPoolSize());
417 AddressPoolManager::GetInstance().Remove(kThreadIsolatedPoolHandle);
418 setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress;
419 setup_.thread_isolation_.enabled = false;
420 }
421 }
422 #endif
423
424 #if defined(PARTITION_ALLOCATOR_CONSTANTS_POSIX_NONCONST_PAGE_SIZE)
425
426 PageCharacteristics page_characteristics;
427
428 #endif
429
430 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
431
432 } // namespace partition_alloc::internal
433