1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
6 #define PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
7
8 #include <array>
9 #include <atomic>
10 #include <bitset>
11 #include <limits>
12
13 #include "build/build_config.h"
14 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
15 #include "partition_alloc/partition_alloc_base/component_export.h"
16 #include "partition_alloc/partition_alloc_buildflags.h"
17 #include "partition_alloc/partition_alloc_check.h"
18 #include "partition_alloc/partition_alloc_constants.h"
19 #include "partition_alloc/partition_lock.h"
20
21 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
22
23 namespace partition_alloc {
24
25 namespace internal {
26
27 // AddressPoolManagerBitmap is a set of bitmaps that track whether a given
28 // address is in a pool that supports BackupRefPtr, or in a pool that doesn't
29 // support it. All PartitionAlloc allocations must be in either of the pools.
30 //
31 // This code is specific to 32-bit systems.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)32 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
33 public:
34 static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
35 static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
36
37 // For BRP pool, we use partition page granularity to eliminate the guard
38 // pages from the bitmap at the ends:
39 // - Eliminating the guard page at the beginning is needed so that pointers
40 // to the end of an allocation that immediately precede a super page in BRP
41 // pool don't accidentally fall into that pool.
42 // - Eliminating the guard page at the end is to ensure that the last page
43 // of the address space isn't in the BRP pool. This allows using sentinels
44 // like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
45 // on an invalid address. (Note, 64-bit systems don't have this problem as
46 // the upper half of the address space always belongs to the OS.)
47 //
48 // Note, direct map allocations also belong to this pool. The same logic as
49 // above applies. It is important to note, however, that the granularity used
50 // here has to be a minimum of partition page size and direct map allocation
51 // granularity. Since DirectMapAllocationGranularity() is no smaller than
52 // PageAllocationGranularity(), we don't need to decrease the bitmap
53 // granularity any further.
54 static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
55 static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
56 static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
57 "");
58 static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
59 static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
60 static constexpr size_t kBRPPoolBits =
61 kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
62
63 // Regular pool may include both normal bucket and direct map allocations, so
64 // the bitmap granularity has to be at least as small as
65 // DirectMapAllocationGranularity(). No need to eliminate guard pages at the
66 // ends, as this is a BackupRefPtr-specific concern, hence no need to lower
67 // the granularity to partition page size.
68 static constexpr size_t kBitShiftOfRegularPoolBitmap =
69 DirectMapAllocationGranularityShift();
70 static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
71 DirectMapAllocationGranularity();
72 static_assert(kBytesPer1BitOfRegularPoolBitmap ==
73 1 << kBitShiftOfRegularPoolBitmap,
74 "");
75 static constexpr size_t kRegularPoolBits =
76 kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
77
78 // Returns false for nullptr.
79 static bool IsManagedByRegularPool(uintptr_t address) {
80 static_assert(
81 std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
82 regular_pool_bits_.size(),
83 "The bitmap is too small, will result in unchecked out of bounds "
84 "accesses.");
85 // It is safe to read |regular_pool_bits_| without a lock since the caller
86 // is responsible for guaranteeing that the address is inside a valid
87 // allocation and the deallocation call won't race with this call.
88 return PA_TS_UNCHECKED_READ(
89 regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
90 }
91
92 // Returns false for nullptr.
93 static bool IsManagedByBRPPool(uintptr_t address) {
94 static_assert(std::numeric_limits<uintptr_t>::max() >>
95 kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
96 "The bitmap is too small, will result in unchecked out of "
97 "bounds accesses.");
98 // It is safe to read |brp_pool_bits_| without a lock since the caller
99 // is responsible for guaranteeing that the address is inside a valid
100 // allocation and the deallocation call won't race with this call.
101 return PA_TS_UNCHECKED_READ(
102 brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
103 }
104
105 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
106 static void BanSuperPageFromBRPPool(uintptr_t address) {
107 brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
108 true, std::memory_order_relaxed);
109 }
110
111 static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
112 // The only potentially dangerous scenario, in which this check is used, is
113 // when the assignment of the first raw_ptr<T> object for an address
114 // allocated outside the BRP pool is racing with the allocation of a new
115 // super page at the same address. We assume that if raw_ptr<T> is being
116 // initialized with a raw pointer, the associated allocation is "alive";
117 // otherwise, the issue should be fixed by rewriting the raw pointer
118 // variable as raw_ptr<T>. In the worst case, when such a fix is
119 // impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
120 // the problematic field. If the above assumption holds, the existing
121 // allocation will prevent us from reserving the super-page region and,
122 // thus, having the race condition. Since we rely on that external
123 // synchronization, the relaxed memory ordering should be sufficient.
124 return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
125 std::memory_order_relaxed);
126 }
127
128 static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
129 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
130
131 private:
132 friend class AddressPoolManager;
133
134 static Lock& GetLock();
135
136 static std::bitset<kRegularPoolBits> regular_pool_bits_
137 PA_GUARDED_BY(GetLock());
138 static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
139 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
140 static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
141 brp_forbidden_super_page_map_;
142 static std::atomic_size_t blocklist_hit_count_;
143 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
144 };
145
146 } // namespace internal
147
148 // Returns false for nullptr.
IsManagedByPartitionAlloc(uintptr_t address)149 PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
150 // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
151 // No need to add IsManagedByConfigurablePool, because Configurable Pool
152 // doesn't exist on 32-bit.
153 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
154 PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
155 #endif
156 return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
157 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
158 || internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
159 #endif
160 ;
161 }
162
163 // Returns false for nullptr.
IsManagedByPartitionAllocRegularPool(uintptr_t address)164 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
165 return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
166 }
167
168 // Returns false for nullptr.
IsManagedByPartitionAllocBRPPool(uintptr_t address)169 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
170 return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
171 }
172
173 // Returns false for nullptr.
IsManagedByPartitionAllocConfigurablePool(uintptr_t address)174 PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
175 uintptr_t address) {
176 // The Configurable Pool is only available on 64-bit builds.
177 return false;
178 }
179
IsConfigurablePoolAvailable()180 PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
181 // The Configurable Pool is only available on 64-bit builds.
182 return false;
183 }
184
185 } // namespace partition_alloc
186
187 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
188
189 #endif // PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
190