1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/address_pool_manager.h"
6
7 #include <cstdint>
8
9 #include "build/build_config.h"
10 #include "partition_alloc/address_space_stats.h"
11 #include "partition_alloc/page_allocator.h"
12 #include "partition_alloc/partition_alloc_base/bits.h"
13 #include "partition_alloc/partition_alloc_buildflags.h"
14 #include "partition_alloc/partition_alloc_constants.h"
15 #include "testing/gtest/include/gtest/gtest.h"
16
17 namespace partition_alloc::internal {
18
19 class AddressSpaceStatsDumperForTesting final : public AddressSpaceStatsDumper {
20 public:
21 AddressSpaceStatsDumperForTesting() = default;
22 ~AddressSpaceStatsDumperForTesting() final = default;
23
DumpStats(const partition_alloc::AddressSpaceStats * address_space_stats)24 void DumpStats(
25 const partition_alloc::AddressSpaceStats* address_space_stats) override {
26 regular_pool_usage_ = address_space_stats->regular_pool_stats.usage;
27 #if BUILDFLAG(HAS_64_BIT_POINTERS)
28 regular_pool_largest_reservation_ =
29 address_space_stats->regular_pool_stats.largest_available_reservation;
30 #endif
31 #if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
32 blocklist_size_ = address_space_stats->blocklist_size;
33 #endif
34 }
35
36 size_t regular_pool_usage_ = 0;
37 size_t regular_pool_largest_reservation_ = 0;
38 size_t blocklist_size_ = 0;
39 };
40
41 #if BUILDFLAG(HAS_64_BIT_POINTERS)
42
43 class AddressPoolManagerForTesting : public AddressPoolManager {
44 public:
45 AddressPoolManagerForTesting() = default;
46 ~AddressPoolManagerForTesting() = default;
47 };
48
49 class PartitionAllocAddressPoolManagerTest : public testing::Test {
50 protected:
51 PartitionAllocAddressPoolManagerTest() = default;
52 ~PartitionAllocAddressPoolManagerTest() override = default;
53
SetUp()54 void SetUp() override {
55 manager_ = std::make_unique<AddressPoolManagerForTesting>();
56 base_address_ =
57 AllocPages(kPoolSize, kSuperPageSize,
58 PageAccessibilityConfiguration(
59 PageAccessibilityConfiguration::kInaccessible),
60 PageTag::kPartitionAlloc);
61 ASSERT_TRUE(base_address_);
62 manager_->Add(kRegularPoolHandle, base_address_, kPoolSize);
63 pool_ = kRegularPoolHandle;
64 }
65
TearDown()66 void TearDown() override {
67 manager_->Remove(pool_);
68 FreePages(base_address_, kPoolSize);
69 manager_.reset();
70 }
71
GetAddressPoolManager()72 AddressPoolManager* GetAddressPoolManager() { return manager_.get(); }
73
74 static constexpr size_t kPoolSize = kPoolMaxSize;
75 static constexpr size_t kPageCnt = kPoolSize / kSuperPageSize;
76
77 std::unique_ptr<AddressPoolManagerForTesting> manager_;
78 uintptr_t base_address_;
79 pool_handle pool_;
80 };
81
TEST_F(PartitionAllocAddressPoolManagerTest,TooLargePool)82 TEST_F(PartitionAllocAddressPoolManagerTest, TooLargePool) {
83 uintptr_t base_addr = 0x4200000;
84 const pool_handle extra_pool = static_cast<pool_handle>(2u);
85 static_assert(kNumPools >= 2);
86
87 EXPECT_DEATH_IF_SUPPORTED(
88 GetAddressPoolManager()->Add(extra_pool, base_addr,
89 kPoolSize + kSuperPageSize),
90 "");
91 }
92
TEST_F(PartitionAllocAddressPoolManagerTest,ManyPages)93 TEST_F(PartitionAllocAddressPoolManagerTest, ManyPages) {
94 EXPECT_EQ(
95 GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
96 base_address_);
97 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
98 GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
99 kPageCnt * kSuperPageSize);
100
101 EXPECT_EQ(
102 GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize),
103 base_address_);
104 GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_,
105 kPageCnt * kSuperPageSize);
106 }
107
TEST_F(PartitionAllocAddressPoolManagerTest,PagesFragmented)108 TEST_F(PartitionAllocAddressPoolManagerTest, PagesFragmented) {
109 uintptr_t addrs[kPageCnt];
110 for (size_t i = 0; i < kPageCnt; ++i) {
111 addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
112 EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
113 }
114 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
115 // Free other other super page, so that we have plenty of free space, but none
116 // of the empty spaces can fit 2 super pages.
117 for (size_t i = 1; i < kPageCnt; i += 2) {
118 GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
119 kSuperPageSize);
120 }
121 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
122 // Reserve freed super pages back, so that there are no free ones.
123 for (size_t i = 1; i < kPageCnt; i += 2) {
124 addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
125 EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
126 }
127 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
128 // Lastly, clean up.
129 for (uintptr_t addr : addrs) {
130 GetAddressPoolManager()->UnreserveAndDecommit(pool_, addr, kSuperPageSize);
131 }
132 }
133
TEST_F(PartitionAllocAddressPoolManagerTest,GetUsedSuperpages)134 TEST_F(PartitionAllocAddressPoolManagerTest, GetUsedSuperpages) {
135 uintptr_t addrs[kPageCnt];
136 for (size_t i = 0; i < kPageCnt; ++i) {
137 addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
138 EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize);
139 }
140 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u);
141
142 std::bitset<kMaxSuperPagesInPool> used_super_pages;
143 GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
144
145 // We expect every bit to be set.
146 for (size_t i = 0; i < kPageCnt; ++i) {
147 ASSERT_TRUE(used_super_pages.test(i));
148 }
149
150 // Free every other super page, so that we have plenty of free space, but none
151 // of the empty spaces can fit 2 super pages.
152 for (size_t i = 1; i < kPageCnt; i += 2) {
153 GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
154 kSuperPageSize);
155 }
156
157 EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u);
158
159 GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
160
161 // We expect every other bit to be set.
162 for (size_t i = 0; i < kPageCnt; i++) {
163 if (i % 2 == 0) {
164 ASSERT_TRUE(used_super_pages.test(i));
165 } else {
166 ASSERT_FALSE(used_super_pages.test(i));
167 }
168 }
169
170 // Free the even numbered super pages.
171 for (size_t i = 0; i < kPageCnt; i += 2) {
172 GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i],
173 kSuperPageSize);
174 }
175
176 // Finally check to make sure all bits are zero in the used superpage bitset.
177 GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages);
178
179 for (size_t i = 0; i < kPageCnt; i++) {
180 ASSERT_FALSE(used_super_pages.test(i));
181 }
182 }
183
TEST_F(PartitionAllocAddressPoolManagerTest,IrregularPattern)184 TEST_F(PartitionAllocAddressPoolManagerTest, IrregularPattern) {
185 uintptr_t a1 = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
186 EXPECT_EQ(a1, base_address_);
187 uintptr_t a2 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
188 EXPECT_EQ(a2, base_address_ + 1 * kSuperPageSize);
189 uintptr_t a3 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
190 EXPECT_EQ(a3, base_address_ + 3 * kSuperPageSize);
191 uintptr_t a4 = GetAddressPoolManager()->Reserve(pool_, 0, 4 * kSuperPageSize);
192 EXPECT_EQ(a4, base_address_ + 6 * kSuperPageSize);
193 uintptr_t a5 = GetAddressPoolManager()->Reserve(pool_, 0, 5 * kSuperPageSize);
194 EXPECT_EQ(a5, base_address_ + 10 * kSuperPageSize);
195
196 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a4, 4 * kSuperPageSize);
197 uintptr_t a6 = GetAddressPoolManager()->Reserve(pool_, 0, 6 * kSuperPageSize);
198 EXPECT_EQ(a6, base_address_ + 15 * kSuperPageSize);
199
200 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a5, 5 * kSuperPageSize);
201 uintptr_t a7 = GetAddressPoolManager()->Reserve(pool_, 0, 7 * kSuperPageSize);
202 EXPECT_EQ(a7, base_address_ + 6 * kSuperPageSize);
203 uintptr_t a8 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize);
204 EXPECT_EQ(a8, base_address_ + 21 * kSuperPageSize);
205 uintptr_t a9 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize);
206 EXPECT_EQ(a9, base_address_ + 13 * kSuperPageSize);
207
208 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a7, 7 * kSuperPageSize);
209 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a9, 2 * kSuperPageSize);
210 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a6, 6 * kSuperPageSize);
211 uintptr_t a10 =
212 GetAddressPoolManager()->Reserve(pool_, 0, 15 * kSuperPageSize);
213 EXPECT_EQ(a10, base_address_ + 6 * kSuperPageSize);
214
215 // Clean up.
216 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a1, kSuperPageSize);
217 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a2, 2 * kSuperPageSize);
218 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a3, 3 * kSuperPageSize);
219 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a8, 3 * kSuperPageSize);
220 GetAddressPoolManager()->UnreserveAndDecommit(pool_, a10,
221 15 * kSuperPageSize);
222 }
223
TEST_F(PartitionAllocAddressPoolManagerTest,DecommittedDataIsErased)224 TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) {
225 uintptr_t address =
226 GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
227 ASSERT_TRUE(address);
228 RecommitSystemPages(address, kSuperPageSize,
229 PageAccessibilityConfiguration(
230 PageAccessibilityConfiguration::kReadWrite),
231 PageAccessibilityDisposition::kRequireUpdate);
232
233 memset(reinterpret_cast<void*>(address), 42, kSuperPageSize);
234 GetAddressPoolManager()->UnreserveAndDecommit(pool_, address, kSuperPageSize);
235
236 uintptr_t address2 =
237 GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize);
238 ASSERT_EQ(address, address2);
239 RecommitSystemPages(address2, kSuperPageSize,
240 PageAccessibilityConfiguration(
241 PageAccessibilityConfiguration::kReadWrite),
242 PageAccessibilityDisposition::kRequireUpdate);
243
244 uint32_t sum = 0;
245 for (size_t i = 0; i < kSuperPageSize; i++) {
246 sum += reinterpret_cast<uint8_t*>(address2)[i];
247 }
248 EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
249
250 GetAddressPoolManager()->UnreserveAndDecommit(pool_, address2,
251 kSuperPageSize);
252 }
253
TEST_F(PartitionAllocAddressPoolManagerTest,RegularPoolUsageChanges)254 TEST_F(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
255 AddressSpaceStatsDumperForTesting dumper{};
256
257 GetAddressPoolManager()->DumpStats(&dumper);
258 ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
259 ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
260
261 // Bisect the pool by reserving a super page in the middle.
262 const uintptr_t midpoint_address =
263 base_address_ + (kPageCnt / 2) * kSuperPageSize;
264 ASSERT_EQ(
265 GetAddressPoolManager()->Reserve(pool_, midpoint_address, kSuperPageSize),
266 midpoint_address);
267
268 GetAddressPoolManager()->DumpStats(&dumper);
269 ASSERT_EQ(dumper.regular_pool_usage_, 1ull);
270 ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt / 2);
271
272 GetAddressPoolManager()->UnreserveAndDecommit(pool_, midpoint_address,
273 kSuperPageSize);
274
275 GetAddressPoolManager()->DumpStats(&dumper);
276 ASSERT_EQ(dumper.regular_pool_usage_, 0ull);
277 ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt);
278 }
279
280 #else // BUILDFLAG(HAS_64_BIT_POINTERS)
281
TEST(PartitionAllocAddressPoolManagerTest,IsManagedByRegularPool)282 TEST(PartitionAllocAddressPoolManagerTest, IsManagedByRegularPool) {
283 constexpr size_t kAllocCount = 8;
284 static const size_t kNumPages[kAllocCount] = {1, 4, 7, 8, 13, 16, 31, 60};
285 uintptr_t addrs[kAllocCount];
286 for (size_t i = 0; i < kAllocCount; ++i) {
287 addrs[i] = AddressPoolManager::GetInstance().Reserve(
288 kRegularPoolHandle, 0,
289 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
290 kNumPages[i]);
291 EXPECT_TRUE(addrs[i]);
292 EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
293 AddressPoolManager::GetInstance().MarkUsed(
294 kRegularPoolHandle, addrs[i],
295 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
296 kNumPages[i]);
297 }
298 for (size_t i = 0; i < kAllocCount; ++i) {
299 uintptr_t address = addrs[i];
300 size_t num_pages =
301 base::bits::AlignUp(
302 kNumPages[i] *
303 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap,
304 kSuperPageSize) /
305 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
306 for (size_t j = 0; j < num_pages; ++j) {
307 if (j < kNumPages[i]) {
308 EXPECT_TRUE(AddressPoolManager::IsManagedByRegularPool(address));
309 } else {
310 EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
311 }
312 EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
313 address += AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
314 }
315 }
316 for (size_t i = 0; i < kAllocCount; ++i) {
317 AddressPoolManager::GetInstance().MarkUnused(
318 kRegularPoolHandle, addrs[i],
319 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
320 kNumPages[i]);
321 AddressPoolManager::GetInstance().UnreserveAndDecommit(
322 kRegularPoolHandle, addrs[i],
323 AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
324 kNumPages[i]);
325 EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
326 EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
327 }
328 }
329
330 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST(PartitionAllocAddressPoolManagerTest,IsManagedByBRPPool)331 TEST(PartitionAllocAddressPoolManagerTest, IsManagedByBRPPool) {
332 constexpr size_t kAllocCount = 4;
333 // Totally (1+3+7+11) * 2MB = 44MB allocation
334 static const size_t kNumPages[kAllocCount] = {1, 3, 7, 11};
335 uintptr_t addrs[kAllocCount];
336 for (size_t i = 0; i < kAllocCount; ++i) {
337 addrs[i] = AddressPoolManager::GetInstance().Reserve(
338 kBRPPoolHandle, 0, kSuperPageSize * kNumPages[i]);
339 EXPECT_TRUE(addrs[i]);
340 EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
341 AddressPoolManager::GetInstance().MarkUsed(kBRPPoolHandle, addrs[i],
342 kSuperPageSize * kNumPages[i]);
343 }
344
345 constexpr size_t first_guard_size =
346 AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
347 AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
348 constexpr size_t last_guard_size =
349 AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
350 (AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap -
351 AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap);
352
353 for (size_t i = 0; i < kAllocCount; ++i) {
354 uintptr_t address = addrs[i];
355 size_t num_allocated_size = kNumPages[i] * kSuperPageSize;
356 size_t num_system_pages = num_allocated_size / SystemPageSize();
357 for (size_t j = 0; j < num_system_pages; ++j) {
358 size_t offset = address - addrs[i];
359 if (offset < first_guard_size ||
360 offset >= (num_allocated_size - last_guard_size)) {
361 EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
362 } else {
363 EXPECT_TRUE(AddressPoolManager::IsManagedByBRPPool(address));
364 }
365 EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
366 address += SystemPageSize();
367 }
368 }
369 for (size_t i = 0; i < kAllocCount; ++i) {
370 AddressPoolManager::GetInstance().MarkUnused(kBRPPoolHandle, addrs[i],
371 kSuperPageSize * kNumPages[i]);
372 AddressPoolManager::GetInstance().UnreserveAndDecommit(
373 kBRPPoolHandle, addrs[i], kSuperPageSize * kNumPages[i]);
374 EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
375 EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
376 }
377 }
378 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
379
TEST(PartitionAllocAddressPoolManagerTest,RegularPoolUsageChanges)380 TEST(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
381 AddressSpaceStatsDumperForTesting dumper{};
382 AddressPoolManager::GetInstance().DumpStats(&dumper);
383 const size_t usage_before = dumper.regular_pool_usage_;
384
385 const uintptr_t address = AddressPoolManager::GetInstance().Reserve(
386 kRegularPoolHandle, 0, kSuperPageSize);
387 ASSERT_TRUE(address);
388 AddressPoolManager::GetInstance().MarkUsed(kRegularPoolHandle, address,
389 kSuperPageSize);
390
391 AddressPoolManager::GetInstance().DumpStats(&dumper);
392 EXPECT_GT(dumper.regular_pool_usage_, usage_before);
393
394 AddressPoolManager::GetInstance().MarkUnused(kRegularPoolHandle, address,
395 kSuperPageSize);
396 AddressPoolManager::GetInstance().UnreserveAndDecommit(
397 kRegularPoolHandle, address, kSuperPageSize);
398
399 AddressPoolManager::GetInstance().DumpStats(&dumper);
400 EXPECT_EQ(dumper.regular_pool_usage_, usage_before);
401 }
402
403 #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
404
405 } // namespace partition_alloc::internal
406