xref: /aosp_15_r20/external/cronet/base/allocator/partition_allocator/src/partition_alloc/page_allocator.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/page_allocator.h"
6 
7 #include <atomic>
8 #include <bit>
9 #include <cstdint>
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/address_space_randomization.h"
13 #include "partition_alloc/page_allocator_internal.h"
14 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
15 #include "partition_alloc/partition_alloc_check.h"
16 #include "partition_alloc/partition_lock.h"
17 
18 #if BUILDFLAG(IS_WIN)
19 #include <windows.h>
20 #endif
21 
22 #if BUILDFLAG(IS_WIN)
23 #include "partition_alloc/page_allocator_internals_win.h"
24 #elif BUILDFLAG(IS_POSIX)
25 #include "partition_alloc/page_allocator_internals_posix.h"
26 #elif BUILDFLAG(IS_FUCHSIA)
27 #include "partition_alloc/page_allocator_internals_fuchsia.h"
28 #else
29 #error Platform not supported.
30 #endif
31 
32 namespace partition_alloc {
33 
34 namespace {
35 
36 internal::Lock g_reserve_lock;
37 
38 // We may reserve/release address space on different threads.
GetReserveLock()39 internal::Lock& GetReserveLock() {
40   return g_reserve_lock;
41 }
42 
43 std::atomic<size_t> g_total_mapped_address_space;
44 
45 // We only support a single block of reserved address space.
46 uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
47 size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
48 
AllocPagesIncludingReserved(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc=-1)49 uintptr_t AllocPagesIncludingReserved(
50     uintptr_t address,
51     size_t length,
52     PageAccessibilityConfiguration accessibility,
53     PageTag page_tag,
54     int file_descriptor_for_shared_alloc = -1) {
55   uintptr_t ret =
56       internal::SystemAllocPages(address, length, accessibility, page_tag,
57                                  file_descriptor_for_shared_alloc);
58   if (!ret) {
59     const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
60     if (cant_alloc_length) {
61       // The system cannot allocate |length| bytes. Release any reserved address
62       // space and try once more.
63       ReleaseReservation();
64       ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
65                                        file_descriptor_for_shared_alloc);
66     }
67   }
68   return ret;
69 }
70 
71 // Trims memory at |base_address| to given |trim_length| and |alignment|.
72 //
73 // On failure, on Windows, this function returns 0 and frees memory at
74 // |base_address|.
TrimMapping(uintptr_t base_address,size_t base_length,size_t trim_length,uintptr_t alignment,uintptr_t alignment_offset,PageAccessibilityConfiguration accessibility)75 uintptr_t TrimMapping(uintptr_t base_address,
76                       size_t base_length,
77                       size_t trim_length,
78                       uintptr_t alignment,
79                       uintptr_t alignment_offset,
80                       PageAccessibilityConfiguration accessibility) {
81   PA_DCHECK(base_length >= trim_length);
82   PA_DCHECK(std::has_single_bit(alignment));
83   PA_DCHECK(alignment_offset < alignment);
84   uintptr_t new_base =
85       NextAlignedWithOffset(base_address, alignment, alignment_offset);
86   PA_DCHECK(new_base >= base_address);
87   size_t pre_slack = new_base - base_address;
88   size_t post_slack = base_length - pre_slack - trim_length;
89   PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
90   PA_DCHECK(pre_slack < base_length);
91   PA_DCHECK(post_slack < base_length);
92   return internal::TrimMappingInternal(base_address, base_length, trim_length,
93                                        accessibility, pre_slack, post_slack);
94 }
95 
96 }  // namespace
97 
98 // Align |address| up to the closest, non-smaller address, that gives
99 // |requested_offset| remainder modulo |alignment|.
100 //
101 // Examples for alignment=1024 and requested_offset=64:
102 //   64 -> 64
103 //   65 -> 1088
104 //   1024 -> 1088
105 //   1088 -> 1088
106 //   1089 -> 2112
107 //   2048 -> 2112
NextAlignedWithOffset(uintptr_t address,uintptr_t alignment,uintptr_t requested_offset)108 uintptr_t NextAlignedWithOffset(uintptr_t address,
109                                 uintptr_t alignment,
110                                 uintptr_t requested_offset) {
111   PA_DCHECK(std::has_single_bit(alignment));
112   PA_DCHECK(requested_offset < alignment);
113 
114   uintptr_t actual_offset = address & (alignment - 1);
115   uintptr_t new_address;
116   if (actual_offset <= requested_offset) {
117     new_address = address + requested_offset - actual_offset;
118   } else {
119     new_address = address + alignment + requested_offset - actual_offset;
120   }
121   PA_DCHECK(new_address >= address);
122   PA_DCHECK(new_address - address < alignment);
123   PA_DCHECK(new_address % alignment == requested_offset);
124 
125   return new_address;
126 }
127 
128 namespace internal {
129 
SystemAllocPages(uintptr_t hint,size_t length,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)130 uintptr_t SystemAllocPages(uintptr_t hint,
131                            size_t length,
132                            PageAccessibilityConfiguration accessibility,
133                            PageTag page_tag,
134                            int file_descriptor_for_shared_alloc) {
135   PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
136   PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
137   uintptr_t ret = internal::SystemAllocPagesInternal(
138       hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
139   if (ret) {
140     g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
141   }
142 
143   return ret;
144 }
145 
146 }  // namespace internal
147 
AllocPages(size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)148 uintptr_t AllocPages(size_t length,
149                      size_t align,
150                      PageAccessibilityConfiguration accessibility,
151                      PageTag page_tag,
152                      int file_descriptor_for_shared_alloc) {
153   return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
154                                    file_descriptor_for_shared_alloc);
155 }
AllocPages(uintptr_t address,size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag)156 uintptr_t AllocPages(uintptr_t address,
157                      size_t length,
158                      size_t align,
159                      PageAccessibilityConfiguration accessibility,
160                      PageTag page_tag) {
161   return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
162                                    page_tag);
163 }
AllocPages(void * address,size_t length,size_t align,PageAccessibilityConfiguration accessibility,PageTag page_tag)164 void* AllocPages(void* address,
165                  size_t length,
166                  size_t align,
167                  PageAccessibilityConfiguration accessibility,
168                  PageTag page_tag) {
169   return reinterpret_cast<void*>(
170       AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
171                  accessibility, page_tag));
172 }
173 
AllocPagesWithAlignOffset(uintptr_t address,size_t length,size_t align,size_t align_offset,PageAccessibilityConfiguration accessibility,PageTag page_tag,int file_descriptor_for_shared_alloc)174 uintptr_t AllocPagesWithAlignOffset(
175     uintptr_t address,
176     size_t length,
177     size_t align,
178     size_t align_offset,
179     PageAccessibilityConfiguration accessibility,
180     PageTag page_tag,
181     int file_descriptor_for_shared_alloc) {
182   PA_DCHECK(length >= internal::PageAllocationGranularity());
183   PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
184   PA_DCHECK(align >= internal::PageAllocationGranularity());
185   // Alignment must be power of 2 for masking math to work.
186   PA_DCHECK(std::has_single_bit(align));
187   PA_DCHECK(align_offset < align);
188   PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
189   PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
190   uintptr_t align_offset_mask = align - 1;
191   uintptr_t align_base_mask = ~align_offset_mask;
192   PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
193 
194   // If the client passed null as the address, choose a good one.
195   if (!address) {
196     address = (GetRandomPageBase() & align_base_mask) + align_offset;
197   }
198 
199   // First try to force an exact-size, aligned allocation from our random base.
200 #if defined(ARCH_CPU_32_BITS)
201   // On 32 bit systems, first try one random aligned address, and then try an
202   // aligned address derived from the value of |ret|.
203   constexpr int kExactSizeTries = 2;
204 #else
205   // On 64 bit systems, try 3 random aligned addresses.
206   constexpr int kExactSizeTries = 3;
207 #endif
208 
209   for (int i = 0; i < kExactSizeTries; ++i) {
210     uintptr_t ret =
211         AllocPagesIncludingReserved(address, length, accessibility, page_tag,
212                                     file_descriptor_for_shared_alloc);
213     if (ret) {
214       // If the alignment is to our liking, we're done.
215       if ((ret & align_offset_mask) == align_offset) {
216         return ret;
217       }
218       // Free the memory and try again.
219       FreePages(ret, length);
220     } else {
221       // |ret| is null; if this try was unhinted, we're OOM.
222       if (internal::kHintIsAdvisory || !address) {
223         return 0;
224       }
225     }
226 
227 #if defined(ARCH_CPU_32_BITS)
228     // For small address spaces, try the first aligned address >= |ret|. Note
229     // |ret| may be null, in which case |address| becomes null. If
230     // |align_offset| is non-zero, this calculation may get us not the first,
231     // but the next matching address.
232     address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
233 #else  // defined(ARCH_CPU_64_BITS)
234     // Keep trying random addresses on systems that have a large address space.
235     address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
236 #endif
237   }
238 
239   // Make a larger allocation so we can force alignment.
240   size_t try_length = length + (align - internal::PageAllocationGranularity());
241   PA_CHECK(try_length >= length);
242   uintptr_t ret;
243 
244   do {
245     // Continue randomizing only on POSIX.
246     address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
247     ret =
248         AllocPagesIncludingReserved(address, try_length, accessibility,
249                                     page_tag, file_descriptor_for_shared_alloc);
250     // The retries are for Windows, where a race can steal our mapping on
251     // resize.
252   } while (ret && (ret = TrimMapping(ret, try_length, length, align,
253                                      align_offset, accessibility)) == 0);
254 
255   return ret;
256 }
257 
FreePages(uintptr_t address,size_t length)258 void FreePages(uintptr_t address, size_t length) {
259   PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
260   PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
261   internal::FreePagesInternal(address, length);
262   PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
263   g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
264 }
FreePages(void * address,size_t length)265 void FreePages(void* address, size_t length) {
266   FreePages(reinterpret_cast<uintptr_t>(address), length);
267 }
268 
TrySetSystemPagesAccess(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility)269 bool TrySetSystemPagesAccess(uintptr_t address,
270                              size_t length,
271                              PageAccessibilityConfiguration accessibility) {
272   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
273   return internal::TrySetSystemPagesAccessInternal(address, length,
274                                                    accessibility);
275 }
TrySetSystemPagesAccess(void * address,size_t length,PageAccessibilityConfiguration accessibility)276 bool TrySetSystemPagesAccess(void* address,
277                              size_t length,
278                              PageAccessibilityConfiguration accessibility) {
279   return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
280                                  accessibility);
281 }
282 
SetSystemPagesAccess(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility)283 void SetSystemPagesAccess(uintptr_t address,
284                           size_t length,
285                           PageAccessibilityConfiguration accessibility) {
286   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
287   internal::SetSystemPagesAccessInternal(address, length, accessibility);
288 }
289 
SetSystemPagesAccess(void * address,size_t length,PageAccessibilityConfiguration accessibility)290 void SetSystemPagesAccess(void* address,
291                           size_t length,
292                           PageAccessibilityConfiguration accessibility) {
293   SetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
294                        accessibility);
295 }
296 
DecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityDisposition accessibility_disposition)297 void DecommitSystemPages(
298     uintptr_t address,
299     size_t length,
300     PageAccessibilityDisposition accessibility_disposition) {
301   PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
302   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
303   internal::DecommitSystemPagesInternal(address, length,
304                                         accessibility_disposition);
305 }
DecommitSystemPages(void * address,size_t length,PageAccessibilityDisposition accessibility_disposition)306 void DecommitSystemPages(
307     void* address,
308     size_t length,
309     PageAccessibilityDisposition accessibility_disposition) {
310   DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
311                       accessibility_disposition);
312 }
313 
DecommitAndZeroSystemPages(uintptr_t address,size_t length,PageTag page_tag)314 bool DecommitAndZeroSystemPages(uintptr_t address,
315                                 size_t length,
316                                 PageTag page_tag) {
317   PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
318   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
319   return internal::DecommitAndZeroSystemPagesInternal(address, length,
320                                                       page_tag);
321 }
322 
DecommitAndZeroSystemPages(void * address,size_t length,PageTag page_tag)323 bool DecommitAndZeroSystemPages(void* address,
324                                 size_t length,
325                                 PageTag page_tag) {
326   return DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address),
327                                     length, page_tag);
328 }
329 
RecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageAccessibilityDisposition accessibility_disposition)330 void RecommitSystemPages(
331     uintptr_t address,
332     size_t length,
333     PageAccessibilityConfiguration accessibility,
334     PageAccessibilityDisposition accessibility_disposition) {
335   PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
336   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
337   PA_DCHECK(accessibility.permissions !=
338             PageAccessibilityConfiguration::kInaccessible);
339   internal::RecommitSystemPagesInternal(address, length, accessibility,
340                                         accessibility_disposition);
341 }
342 
TryRecommitSystemPages(uintptr_t address,size_t length,PageAccessibilityConfiguration accessibility,PageAccessibilityDisposition accessibility_disposition)343 bool TryRecommitSystemPages(
344     uintptr_t address,
345     size_t length,
346     PageAccessibilityConfiguration accessibility,
347     PageAccessibilityDisposition accessibility_disposition) {
348   // Duplicated because we want errors to be reported at a lower level in the
349   // crashing case.
350   PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
351   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
352   PA_DCHECK(accessibility.permissions !=
353             PageAccessibilityConfiguration::kInaccessible);
354   return internal::TryRecommitSystemPagesInternal(
355       address, length, accessibility, accessibility_disposition);
356 }
357 
DiscardSystemPages(uintptr_t address,size_t length)358 void DiscardSystemPages(uintptr_t address, size_t length) {
359   PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
360   internal::DiscardSystemPagesInternal(address, length);
361 }
DiscardSystemPages(void * address,size_t length)362 void DiscardSystemPages(void* address, size_t length) {
363   DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
364 }
365 
ReserveAddressSpace(size_t size)366 bool ReserveAddressSpace(size_t size) {
367   // To avoid deadlock, call only SystemAllocPages.
368   internal::ScopedGuard guard(GetReserveLock());
369   if (!s_reservation_address) {
370     uintptr_t mem = internal::SystemAllocPages(
371         0, size,
372         PageAccessibilityConfiguration(
373             PageAccessibilityConfiguration::kInaccessible),
374         PageTag::kChromium);
375     if (mem) {
376       // We guarantee this alignment when reserving address space.
377       PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
378       s_reservation_address = mem;
379       s_reservation_size = size;
380       return true;
381     }
382   }
383   return false;
384 }
385 
ReleaseReservation()386 bool ReleaseReservation() {
387   // To avoid deadlock, call only FreePages.
388   internal::ScopedGuard guard(GetReserveLock());
389   if (!s_reservation_address) {
390     return false;
391   }
392 
393   FreePages(s_reservation_address, s_reservation_size);
394   s_reservation_address = 0;
395   s_reservation_size = 0;
396   return true;
397 }
398 
HasReservationForTesting()399 bool HasReservationForTesting() {
400   internal::ScopedGuard guard(GetReserveLock());
401   return s_reservation_address;
402 }
403 
GetAllocPageErrorCode()404 uint32_t GetAllocPageErrorCode() {
405   return internal::s_allocPageErrorCode;
406 }
407 
GetTotalMappedSize()408 size_t GetTotalMappedSize() {
409   return g_total_mapped_address_space;
410 }
411 
412 #if BUILDFLAG(IS_WIN)
413 namespace {
414 bool g_retry_on_commit_failure = false;
415 }
416 
SetRetryOnCommitFailure(bool retry_on_commit_failure)417 void SetRetryOnCommitFailure(bool retry_on_commit_failure) {
418   g_retry_on_commit_failure = retry_on_commit_failure;
419 }
420 
GetRetryOnCommitFailure()421 bool GetRetryOnCommitFailure() {
422   return g_retry_on_commit_failure;
423 }
424 #endif
425 
426 }  // namespace partition_alloc
427