1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/address_pool_manager.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <cstdint>
10 #include <limits>
11 
12 #include "build/build_config.h"
13 #include "partition_alloc/address_space_stats.h"
14 #include "partition_alloc/page_allocator.h"
15 #include "partition_alloc/page_allocator_constants.h"
16 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
17 #include "partition_alloc/partition_alloc_base/notreached.h"
18 #include "partition_alloc/partition_alloc_buildflags.h"
19 #include "partition_alloc/partition_alloc_check.h"
20 #include "partition_alloc/partition_alloc_constants.h"
21 #include "partition_alloc/reservation_offset_table.h"
22 #include "partition_alloc/thread_isolation/alignment.h"
23 
24 #if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
25 #include <sys/mman.h>
26 #endif
27 
28 namespace partition_alloc::internal {
29 
30 AddressPoolManager AddressPoolManager::singleton_;
31 
32 // static
GetInstance()33 AddressPoolManager& AddressPoolManager::GetInstance() {
34   return singleton_;
35 }
36 
37 namespace {
38 // Allocations are all performed on behalf of PartitionAlloc.
39 constexpr PageTag kPageTag = PageTag::kPartitionAlloc;
40 
41 }  // namespace
42 
43 #if BUILDFLAG(HAS_64_BIT_POINTERS)
44 
45 namespace {
46 
47 // This will crash if the range cannot be decommitted.
DecommitPages(uintptr_t address,size_t size)48 void DecommitPages(uintptr_t address, size_t size) {
49   // Callers rely on the pages being zero-initialized when recommitting them.
50   // |DecommitSystemPages| doesn't guarantee this on all operating systems, in
51   // particular on macOS, but |DecommitAndZeroSystemPages| does.
52   DecommitAndZeroSystemPages(address, size, kPageTag);
53 }
54 
55 }  // namespace
56 
Add(pool_handle handle,uintptr_t ptr,size_t length)57 void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
58   PA_DCHECK(!(ptr & kSuperPageOffsetMask));
59   PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
60   PA_CHECK(handle > 0 && handle <= std::size(pools_));
61 
62   Pool* pool = GetPool(handle);
63   PA_CHECK(!pool->IsInitialized());
64   pool->Initialize(ptr, length);
65 }
66 
GetPoolUsedSuperPages(pool_handle handle,std::bitset<kMaxSuperPagesInPool> & used)67 void AddressPoolManager::GetPoolUsedSuperPages(
68     pool_handle handle,
69     std::bitset<kMaxSuperPagesInPool>& used) {
70   Pool* pool = GetPool(handle);
71   if (!pool) {
72     return;
73   }
74 
75   pool->GetUsedSuperPages(used);
76 }
77 
GetPoolBaseAddress(pool_handle handle)78 uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
79   Pool* pool = GetPool(handle);
80   if (!pool) {
81     return 0;
82   }
83 
84   return pool->GetBaseAddress();
85 }
86 
ResetForTesting()87 void AddressPoolManager::ResetForTesting() {
88   for (size_t i = 0; i < std::size(pools_); ++i) {
89     pools_[i].Reset();
90   }
91 }
92 
Remove(pool_handle handle)93 void AddressPoolManager::Remove(pool_handle handle) {
94   Pool* pool = GetPool(handle);
95   PA_DCHECK(pool->IsInitialized());
96   pool->Reset();
97 }
98 
Reserve(pool_handle handle,uintptr_t requested_address,size_t length)99 uintptr_t AddressPoolManager::Reserve(pool_handle handle,
100                                       uintptr_t requested_address,
101                                       size_t length) {
102   Pool* pool = GetPool(handle);
103   if (!requested_address) {
104     return pool->FindChunk(length);
105   }
106   const bool is_available = pool->TryReserveChunk(requested_address, length);
107   if (is_available) {
108     return requested_address;
109   }
110   return pool->FindChunk(length);
111 }
112 
UnreserveAndDecommit(pool_handle handle,uintptr_t address,size_t length)113 void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
114                                               uintptr_t address,
115                                               size_t length) {
116   PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
117   Pool* pool = GetPool(handle);
118   PA_DCHECK(pool->IsInitialized());
119   DecommitPages(address, length);
120   pool->FreeChunk(address, length);
121 }
122 
Initialize(uintptr_t ptr,size_t length)123 void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
124   PA_CHECK(ptr != 0);
125   PA_CHECK(!(ptr & kSuperPageOffsetMask));
126   PA_CHECK(!(length & kSuperPageOffsetMask));
127   address_begin_ = ptr;
128 #if BUILDFLAG(PA_DCHECK_IS_ON)
129   address_end_ = ptr + length;
130   PA_DCHECK(address_begin_ < address_end_);
131 #endif
132 
133   total_bits_ = length / kSuperPageSize;
134   PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
135 
136   ScopedGuard scoped_lock(lock_);
137   alloc_bitset_.reset();
138   bit_hint_ = 0;
139 }
140 
IsInitialized()141 bool AddressPoolManager::Pool::IsInitialized() {
142   return address_begin_ != 0;
143 }
144 
Reset()145 void AddressPoolManager::Pool::Reset() {
146   address_begin_ = 0;
147 }
148 
GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool> & used)149 void AddressPoolManager::Pool::GetUsedSuperPages(
150     std::bitset<kMaxSuperPagesInPool>& used) {
151   ScopedGuard scoped_lock(lock_);
152 
153   PA_DCHECK(IsInitialized());
154   used = alloc_bitset_;
155 }
156 
GetBaseAddress()157 uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
158   PA_DCHECK(IsInitialized());
159   return address_begin_;
160 }
161 
FindChunk(size_t requested_size)162 uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
163   ScopedGuard scoped_lock(lock_);
164 
165   PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
166   const size_t need_bits = requested_size >> kSuperPageShift;
167 
168   // Use first-fit policy to find an available chunk from free chunks. Start
169   // from |bit_hint_|, because we know there are no free chunks before.
170   size_t beg_bit = bit_hint_;
171   size_t curr_bit = bit_hint_;
172   while (true) {
173     // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
174     // |total_bits_|, return |nullptr| to signal no free chunk was found.
175     size_t end_bit = beg_bit + need_bits;
176     if (end_bit > total_bits_) {
177       return 0;
178     }
179 
180     bool found = true;
181     for (; curr_bit < end_bit; ++curr_bit) {
182       if (alloc_bitset_.test(curr_bit)) {
183         // The bit was set, so this chunk isn't entirely free. Set |found=false|
184         // to ensure the outer loop continues. However, continue the inner loop
185         // to set |beg_bit| just past the last set bit in the investigated
186         // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
187         // next outer loop pass from checking the same bits.
188         beg_bit = curr_bit + 1;
189         found = false;
190         if (bit_hint_ == curr_bit) {
191           ++bit_hint_;
192         }
193       }
194     }
195 
196     // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
197     // mark as allocated) and return the allocated address.
198     if (found) {
199       for (size_t i = beg_bit; i < end_bit; ++i) {
200         PA_DCHECK(!alloc_bitset_.test(i));
201         alloc_bitset_.set(i);
202       }
203       if (bit_hint_ == beg_bit) {
204         bit_hint_ = end_bit;
205       }
206       uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
207 #if BUILDFLAG(PA_DCHECK_IS_ON)
208       PA_DCHECK(address + requested_size <= address_end_);
209 #endif
210       return address;
211     }
212   }
213 
214   PA_NOTREACHED();
215 }
216 
TryReserveChunk(uintptr_t address,size_t requested_size)217 bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
218                                                size_t requested_size) {
219   ScopedGuard scoped_lock(lock_);
220   PA_DCHECK(!(address & kSuperPageOffsetMask));
221   PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
222   const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
223   const size_t need_bits = requested_size / kSuperPageSize;
224   const size_t end_bit = begin_bit + need_bits;
225   // Check that requested address is not too high.
226   if (end_bit > total_bits_) {
227     return false;
228   }
229   // Check if any bit of the requested region is set already.
230   for (size_t i = begin_bit; i < end_bit; ++i) {
231     if (alloc_bitset_.test(i)) {
232       return false;
233     }
234   }
235   // Otherwise, set the bits.
236   for (size_t i = begin_bit; i < end_bit; ++i) {
237     alloc_bitset_.set(i);
238   }
239   return true;
240 }
241 
FreeChunk(uintptr_t address,size_t free_size)242 void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
243   ScopedGuard scoped_lock(lock_);
244 
245   PA_DCHECK(!(address & kSuperPageOffsetMask));
246   PA_DCHECK(!(free_size & kSuperPageOffsetMask));
247 
248   PA_DCHECK(address_begin_ <= address);
249 #if BUILDFLAG(PA_DCHECK_IS_ON)
250   PA_DCHECK(address + free_size <= address_end_);
251 #endif
252 
253   const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
254   const size_t end_bit = beg_bit + free_size / kSuperPageSize;
255   for (size_t i = beg_bit; i < end_bit; ++i) {
256     PA_DCHECK(alloc_bitset_.test(i));
257     alloc_bitset_.reset(i);
258   }
259   bit_hint_ = std::min(bit_hint_, beg_bit);
260 }
261 
GetStats(PoolStats * stats)262 void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
263   std::bitset<kMaxSuperPagesInPool> pages;
264   size_t i;
265   {
266     ScopedGuard scoped_lock(lock_);
267     pages = alloc_bitset_;
268     i = bit_hint_;
269   }
270 
271   stats->usage = pages.count();
272 
273   size_t largest_run = 0;
274   size_t current_run = 0;
275   for (; i < total_bits_; ++i) {
276     if (!pages[i]) {
277       current_run += 1;
278       continue;
279     } else if (current_run > largest_run) {
280       largest_run = current_run;
281     }
282     current_run = 0;
283   }
284 
285   // Fell out of the loop with last bit being zero. Check once more.
286   if (current_run > largest_run) {
287     largest_run = current_run;
288   }
289   stats->largest_available_reservation = largest_run;
290 }
291 
GetPoolStats(const pool_handle handle,PoolStats * stats)292 void AddressPoolManager::GetPoolStats(const pool_handle handle,
293                                       PoolStats* stats) {
294   Pool* pool = GetPool(handle);
295   if (!pool->IsInitialized()) {
296     return;
297   }
298   pool->GetStats(stats);
299 }
300 
GetStats(AddressSpaceStats * stats)301 bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
302   // Get 64-bit pool stats.
303   GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
304 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
305   GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
306 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
307   if (IsConfigurablePoolAvailable()) {
308     GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
309   }
310 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
311   GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats);
312 #endif
313   return true;
314 }
315 
316 #else  // BUILDFLAG(HAS_64_BIT_POINTERS)
317 
318 static_assert(
319     kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
320         0,
321     "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
322 static_assert(
323     kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
324     "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
325 static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
326                   AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
327               "kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
328               "kGuardOffsetOfBRPPoolBitmap.");
329 
330 template <size_t bitsize>
SetBitmap(std::bitset<bitsize> & bitmap,size_t start_bit,size_t bit_length)331 void SetBitmap(std::bitset<bitsize>& bitmap,
332                size_t start_bit,
333                size_t bit_length) {
334   const size_t end_bit = start_bit + bit_length;
335   PA_DCHECK(start_bit <= bitsize);
336   PA_DCHECK(end_bit <= bitsize);
337 
338   for (size_t i = start_bit; i < end_bit; ++i) {
339     PA_DCHECK(!bitmap.test(i));
340     bitmap.set(i);
341   }
342 }
343 
344 template <size_t bitsize>
ResetBitmap(std::bitset<bitsize> & bitmap,size_t start_bit,size_t bit_length)345 void ResetBitmap(std::bitset<bitsize>& bitmap,
346                  size_t start_bit,
347                  size_t bit_length) {
348   const size_t end_bit = start_bit + bit_length;
349   PA_DCHECK(start_bit <= bitsize);
350   PA_DCHECK(end_bit <= bitsize);
351 
352   for (size_t i = start_bit; i < end_bit; ++i) {
353     PA_DCHECK(bitmap.test(i));
354     bitmap.reset(i);
355   }
356 }
357 
Reserve(pool_handle handle,uintptr_t requested_address,size_t length)358 uintptr_t AddressPoolManager::Reserve(pool_handle handle,
359                                       uintptr_t requested_address,
360                                       size_t length) {
361   PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
362   uintptr_t address =
363       AllocPages(requested_address, length, kSuperPageSize,
364                  PageAccessibilityConfiguration(
365                      PageAccessibilityConfiguration::kInaccessible),
366                  kPageTag);
367   return address;
368 }
369 
UnreserveAndDecommit(pool_handle handle,uintptr_t address,size_t length)370 void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
371                                               uintptr_t address,
372                                               size_t length) {
373   PA_DCHECK(!(address & kSuperPageOffsetMask));
374   PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
375   FreePages(address, length);
376 }
377 
MarkUsed(pool_handle handle,uintptr_t address,size_t length)378 void AddressPoolManager::MarkUsed(pool_handle handle,
379                                   uintptr_t address,
380                                   size_t length) {
381   ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
382   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
383 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
384   if (handle == kBRPPoolHandle) {
385     PA_DCHECK(
386         (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
387 
388     // Make IsManagedByBRPPoolPool() return false when an address inside the
389     // first or the last PartitionPageSize()-bytes block is given:
390     //
391     //          ------+---+---------------+---+----
392     // memory   ..... | B | managed by PA | B | ...
393     // regions  ------+---+---------------+---+----
394     //
395     // B: PartitionPageSize()-bytes block. This is used internally by the
396     // allocator and is not available for callers.
397     //
398     // This is required to avoid crash caused by the following code:
399     //   {
400     //     // Assume this allocation happens outside of PartitionAlloc.
401     //     raw_ptr<T> ptr = new T[20];
402     //     for (size_t i = 0; i < 20; i ++) { ptr++; }
403     //     // |ptr| may point to an address inside 'B'.
404     //   }
405     //
406     // Suppose that |ptr| points to an address inside B after the loop. If
407     // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
408     // crash, since the memory is not allocated by PartitionAlloc.
409     SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
410               (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
411                   AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
412               (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
413                   AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
414   } else
415 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
416   {
417     PA_DCHECK(handle == kRegularPoolHandle);
418     PA_DCHECK(
419         (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
420         0);
421     SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
422               address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
423               length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
424   }
425 }
426 
MarkUnused(pool_handle handle,uintptr_t address,size_t length)427 void AddressPoolManager::MarkUnused(pool_handle handle,
428                                     uintptr_t address,
429                                     size_t length) {
430   // Address regions allocated for normal buckets are never released, so this
431   // function can only be called for direct map. However, do not DCHECK on
432   // IsManagedByDirectMap(address), because many tests test this function using
433   // small allocations.
434 
435   ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
436   // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
437 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
438   if (handle == kBRPPoolHandle) {
439     PA_DCHECK(
440         (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
441 
442     // Make IsManagedByBRPPoolPool() return false when an address inside the
443     // first or the last PartitionPageSize()-bytes block is given.
444     // (See MarkUsed comment)
445     ResetBitmap(
446         AddressPoolManagerBitmap::brp_pool_bits_,
447         (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
448             AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
449         (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
450             AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
451   } else
452 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
453   {
454     PA_DCHECK(handle == kRegularPoolHandle);
455     PA_DCHECK(
456         (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
457         0);
458     ResetBitmap(
459         AddressPoolManagerBitmap::regular_pool_bits_,
460         address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
461         length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
462   }
463 }
464 
ResetForTesting()465 void AddressPoolManager::ResetForTesting() {
466   ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
467   AddressPoolManagerBitmap::regular_pool_bits_.reset();
468   AddressPoolManagerBitmap::brp_pool_bits_.reset();
469 }
470 
471 namespace {
472 
473 // Counts super pages in use represented by `bitmap`.
474 template <size_t bitsize>
CountUsedSuperPages(const std::bitset<bitsize> & bitmap,const size_t bits_per_super_page)475 size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
476                            const size_t bits_per_super_page) {
477   size_t count = 0;
478   size_t bit_index = 0;
479 
480   // Stride over super pages.
481   for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
482     // Stride over the bits comprising the super page.
483     for (bit_index = super_page_index * bits_per_super_page;
484          bit_index < (super_page_index + 1) * bits_per_super_page &&
485          bit_index < bitsize;
486          ++bit_index) {
487       if (bitmap[bit_index]) {
488         count += 1;
489         // Move on to the next super page.
490         break;
491       }
492     }
493   }
494   return count;
495 }
496 
497 }  // namespace
498 
GetStats(AddressSpaceStats * stats)499 bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
500   std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
501   std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
502   {
503     ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
504     regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
505     brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
506   }  // scoped_lock
507 
508   // Pool usage is read out from the address pool bitmaps.
509   // The output stats are sized in super pages, so we interpret
510   // the bitmaps into super page usage.
511   static_assert(
512       kSuperPageSize %
513               AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
514           0,
515       "information loss when calculating metrics");
516   constexpr size_t kRegularPoolBitsPerSuperPage =
517       kSuperPageSize /
518       AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
519 
520   // Get 32-bit pool usage.
521   stats->regular_pool_stats.usage =
522       CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
523 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
524   static_assert(
525       kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
526           0,
527       "information loss when calculating metrics");
528   constexpr size_t kBRPPoolBitsPerSuperPage =
529       kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
530   stats->brp_pool_stats.usage =
531       CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
532 
533   // Get blocklist size.
534   for (const auto& blocked :
535        AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
536     if (blocked.load(std::memory_order_relaxed)) {
537       stats->blocklist_size += 1;
538     }
539   }
540 
541   // Count failures in finding non-blocklisted addresses.
542   stats->blocklist_hit_count =
543       AddressPoolManagerBitmap::blocklist_hit_count_.load(
544           std::memory_order_relaxed);
545 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
546   return true;
547 }
548 
549 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
550 
DumpStats(AddressSpaceStatsDumper * dumper)551 void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
552   AddressSpaceStats stats{};
553   if (GetStats(&stats)) {
554     dumper->DumpStats(&stats);
555   }
556 }
557 
558 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
559 // This function just exists to static_assert the layout of the private fields
560 // in Pool.
AssertThreadIsolatedLayout()561 void AddressPoolManager::AssertThreadIsolatedLayout() {
562   constexpr size_t last_pool_offset =
563       offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
564   constexpr size_t alloc_bitset_offset =
565       last_pool_offset + offsetof(Pool, alloc_bitset_);
566   static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
567   static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
568 }
569 #endif  // BUILDFLAG(ENABLE_THREAD_ISOLATION)
570 
571 }  // namespace partition_alloc::internal
572