1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/partition_alloc_for_testing.h"
6
7 #include <algorithm>
8 #include <bit>
9 #include <cstddef>
10 #include <cstdint>
11 #include <cstdlib>
12 #include <cstring>
13 #include <iostream>
14 #include <limits>
15 #include <memory>
16 #include <random>
17 #include <set>
18 #include <tuple>
19 #include <vector>
20
21 #include "base/system/sys_info.h"
22 #include "base/test/gtest_util.h"
23 #include "build/build_config.h"
24 #include "partition_alloc/address_space_randomization.h"
25 #include "partition_alloc/chromecast_buildflags.h"
26 #include "partition_alloc/dangling_raw_ptr_checks.h"
27 #include "partition_alloc/freeslot_bitmap.h"
28 #include "partition_alloc/in_slot_metadata.h"
29 #include "partition_alloc/lightweight_quarantine.h"
30 #include "partition_alloc/memory_reclaimer.h"
31 #include "partition_alloc/page_allocator_constants.h"
32 #include "partition_alloc/partition_address_space.h"
33 #include "partition_alloc/partition_alloc_base/bits.h"
34 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
35 #include "partition_alloc/partition_alloc_base/cpu.h"
36 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
37 #include "partition_alloc/partition_alloc_base/logging.h"
38 #include "partition_alloc/partition_alloc_base/numerics/checked_math.h"
39 #include "partition_alloc/partition_alloc_base/rand_util.h"
40 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
41 #include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
42 #include "partition_alloc/partition_alloc_buildflags.h"
43 #include "partition_alloc/partition_alloc_config.h"
44 #include "partition_alloc/partition_alloc_constants.h"
45 #include "partition_alloc/partition_alloc_forward.h"
46 #include "partition_alloc/partition_bucket.h"
47 #include "partition_alloc/partition_cookie.h"
48 #include "partition_alloc/partition_freelist_entry.h"
49 #include "partition_alloc/partition_page.h"
50 #include "partition_alloc/partition_root.h"
51 #include "partition_alloc/partition_stats.h"
52 #include "partition_alloc/reservation_offset_table.h"
53 #include "partition_alloc/tagging.h"
54 #include "partition_alloc/thread_isolation/thread_isolation.h"
55 #include "testing/gtest/include/gtest/gtest.h"
56
57 #if defined(__ARM_FEATURE_MEMORY_TAGGING)
58 #include <arm_acle.h>
59 #endif
60
61 #if BUILDFLAG(IS_POSIX)
62 #if BUILDFLAG(IS_LINUX)
63 // We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
64 // it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
65 // We have to include both to be sure we get the definition.
66 #include <linux/mman.h>
67 #endif // BUILDFLAG(IS_LINUX)
68 #include <sys/mman.h>
69 #include <sys/resource.h>
70 #include <sys/time.h>
71 #endif // BUILDFLAG(IS_POSIX)
72
73 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
74 #include <OpenCL/opencl.h>
75 #endif
76
77 #if BUILDFLAG(IS_MAC)
78 #include "partition_alloc/partition_alloc_base/mac/mac_util.h"
79 #endif
80
81 #if BUILDFLAG(ENABLE_PKEYS)
82 #include <sys/syscall.h>
83 #endif
84
85 // In the MTE world, the upper bits of a pointer can be decorated with a tag,
86 // thus allowing many versions of the same pointer to exist. These macros take
87 // that into account when comparing.
88 #define PA_EXPECT_PTR_EQ(ptr1, ptr2) \
89 { EXPECT_EQ(UntagPtr(ptr1), UntagPtr(ptr2)); }
90 #define PA_EXPECT_PTR_NE(ptr1, ptr2) \
91 { EXPECT_NE(UntagPtr(ptr1), UntagPtr(ptr2)); }
92
93 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
94
95 namespace {
96
IsLargeMemoryDevice()97 bool IsLargeMemoryDevice() {
98 // Treat any device with 4GiB or more of physical memory as a "large memory
99 // device". We check for slightly less than GiB so that devices with a small
100 // amount of memory not accessible to the OS still count as "large".
101 //
102 // Set to 4GiB, since we have 2GiB Android devices where tests flakily fail
103 // (e.g. Nexus 5X, crbug.com/1191195).
104 return base::SysInfo::AmountOfPhysicalMemory() >= 4000ULL * 1024 * 1024;
105 }
106
SetAddressSpaceLimit()107 bool SetAddressSpaceLimit() {
108 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
109 // 32 bits => address space is limited already.
110 return true;
111 #elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)
112 // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
113 // https://crbug.com/435269 and rdar://17576114.
114 //
115 // Note: This number must be not less than 6 GB, because with
116 // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
117 // https://crbug.com/674665.
118 const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
119 struct rlimit limit;
120 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
121 return false;
122 }
123 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
124 limit.rlim_cur = kAddressSpaceLimit;
125 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
126 return false;
127 }
128 }
129 return true;
130 #else
131 return false;
132 #endif
133 }
134
ClearAddressSpaceLimit()135 bool ClearAddressSpaceLimit() {
136 #if !defined(ARCH_CPU_64_BITS) || !BUILDFLAG(IS_POSIX)
137 return true;
138 #elif BUILDFLAG(IS_POSIX)
139 struct rlimit limit;
140 if (getrlimit(RLIMIT_DATA, &limit) != 0) {
141 return false;
142 }
143 limit.rlim_cur = limit.rlim_max;
144 if (setrlimit(RLIMIT_DATA, &limit) != 0) {
145 return false;
146 }
147 return true;
148 #else
149 return false;
150 #endif
151 }
152
153 const size_t kTestSizes[] = {
154 1,
155 17,
156 100,
157 partition_alloc::internal::SystemPageSize(),
158 partition_alloc::internal::SystemPageSize() + 1,
159 partition_alloc::PartitionRoot::GetDirectMapSlotSize(100),
160 1 << 20,
161 1 << 21,
162 };
163 constexpr size_t kTestSizesCount = std::size(kTestSizes);
164
165 template <
166 partition_alloc::AllocFlags alloc_flags,
167 partition_alloc::FreeFlags free_flags = partition_alloc::FreeFlags::kNone>
AllocateRandomly(partition_alloc::PartitionRoot * root,size_t count)168 void AllocateRandomly(partition_alloc::PartitionRoot* root, size_t count) {
169 std::vector<void*> allocations(count, nullptr);
170 for (size_t i = 0; i < count; ++i) {
171 const size_t size =
172 kTestSizes[partition_alloc::internal::base::RandGenerator(
173 kTestSizesCount)];
174 allocations[i] = root->Alloc<alloc_flags>(size);
175 EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
176 }
177
178 for (size_t i = 0; i < count; ++i) {
179 if (allocations[i]) {
180 root->Free(allocations[i]);
181 }
182 }
183 }
184
HandleOOM(size_t unused_size)185 void HandleOOM(size_t unused_size) {
186 PA_LOG(FATAL) << "Out of memory";
187 }
188
189 int g_dangling_raw_ptr_detected_count = 0;
190 int g_dangling_raw_ptr_released_count = 0;
191
192 class CountDanglingRawPtr {
193 public:
CountDanglingRawPtr()194 CountDanglingRawPtr() {
195 g_dangling_raw_ptr_detected_count = 0;
196 g_dangling_raw_ptr_released_count = 0;
197 old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
198 old_released_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
199
200 partition_alloc::SetDanglingRawPtrDetectedFn(
201 CountDanglingRawPtr::DanglingRawPtrDetected);
202 partition_alloc::SetDanglingRawPtrReleasedFn(
203 CountDanglingRawPtr::DanglingRawPtrReleased);
204 }
~CountDanglingRawPtr()205 ~CountDanglingRawPtr() {
206 partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
207 partition_alloc::SetDanglingRawPtrReleasedFn(old_released_fn_);
208 }
209
210 private:
DanglingRawPtrDetected(uintptr_t)211 static void DanglingRawPtrDetected(uintptr_t) {
212 g_dangling_raw_ptr_detected_count++;
213 }
DanglingRawPtrReleased(uintptr_t)214 static void DanglingRawPtrReleased(uintptr_t) {
215 g_dangling_raw_ptr_released_count++;
216 }
217
218 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
219 partition_alloc::DanglingRawPtrReleasedFn* old_released_fn_;
220 };
221
222 } // namespace
223
224 // Note: This test exercises interfaces inside the `partition_alloc`
225 // namespace, but inspects objects inside `partition_alloc::internal`.
226 // For ease of reading, the tests are placed into the latter namespace.
227 namespace partition_alloc::internal {
228
229 using BucketDistribution = PartitionRoot::BucketDistribution;
230 using SlotSpan = SlotSpanMetadata;
231
232 const size_t kTestAllocSize = 16;
233
234 constexpr size_t kPointerOffset = 0;
235 #if !BUILDFLAG(PA_DCHECK_IS_ON)
236 constexpr size_t kExtraAllocSizeWithoutMetadata = 0ull;
237 #else
238 constexpr size_t kExtraAllocSizeWithoutMetadata = kCookieSize;
239 #endif
240
241 const char* type_name = nullptr;
242
SetDistributionForPartitionRoot(PartitionRoot * root,BucketDistribution distribution)243 void SetDistributionForPartitionRoot(PartitionRoot* root,
244 BucketDistribution distribution) {
245 switch (distribution) {
246 case BucketDistribution::kNeutral:
247 root->ResetBucketDistributionForTesting();
248 break;
249 case BucketDistribution::kDenser:
250 root->SwitchToDenserBucketDistribution();
251 break;
252 }
253 }
254
255 struct PartitionAllocTestParam {
256 BucketDistribution bucket_distribution;
257 bool use_pkey_pool;
258 };
259
GetPartitionAllocTestParams()260 const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
261 std::vector<PartitionAllocTestParam> params;
262 params.emplace_back(
263 PartitionAllocTestParam{BucketDistribution::kNeutral, false});
264 params.emplace_back(
265 PartitionAllocTestParam{BucketDistribution::kDenser, false});
266 #if BUILDFLAG(ENABLE_PKEYS)
267 if (CPUHasPkeySupport()) {
268 params.emplace_back(
269 PartitionAllocTestParam{BucketDistribution::kNeutral, true});
270 params.emplace_back(
271 PartitionAllocTestParam{BucketDistribution::kDenser, true});
272 }
273 #endif
274 return params;
275 }
276
277 class PartitionAllocTest
278 : public testing::TestWithParam<PartitionAllocTestParam> {
279 protected:
280 class ScopedPageAllocation {
281 public:
ScopedPageAllocation(PartitionAllocator & allocator,base::CheckedNumeric<size_t> npages)282 ScopedPageAllocation(PartitionAllocator& allocator,
283 base::CheckedNumeric<size_t> npages)
284 : allocator_(allocator),
285 npages_(npages),
286 ptr_(static_cast<char*>(allocator_.root()->Alloc(
287 (npages * SystemPageSize() - ExtraAllocSize(allocator_))
288 .ValueOrDie(),
289 type_name))) {}
290
~ScopedPageAllocation()291 ~ScopedPageAllocation() { allocator_.root()->Free(ptr_); }
292
TouchAllPages()293 void TouchAllPages() {
294 memset(ptr_, 'A',
295 ((npages_ * SystemPageSize()) - ExtraAllocSize(allocator_))
296 .ValueOrDie());
297 }
298
PageAtIndex(size_t index)299 void* PageAtIndex(size_t index) {
300 return ptr_ - kPointerOffset + (SystemPageSize() * index);
301 }
302
303 private:
304 PartitionAllocator& allocator_;
305 const base::CheckedNumeric<size_t> npages_;
306 char* ptr_;
307 };
308
309 PartitionAllocTest() = default;
310
311 ~PartitionAllocTest() override = default;
312
313 struct PartitionTestOptions {
314 bool use_memory_reclaimer = false;
315 bool uncap_empty_slot_span_memory = false;
316 bool set_bucket_distribution = false;
317 };
318
InitializeTestRoot(PartitionRoot * root,PartitionOptions opts,PartitionTestOptions test_opts)319 void InitializeTestRoot(PartitionRoot* root,
320 PartitionOptions opts,
321 PartitionTestOptions test_opts) {
322 root->Init(opts);
323 if (test_opts.use_memory_reclaimer) {
324 MemoryReclaimer::Instance()->RegisterPartition(root);
325 }
326 if (test_opts.uncap_empty_slot_span_memory) {
327 root->UncapEmptySlotSpanMemoryForTesting();
328 }
329 if (test_opts.set_bucket_distribution) {
330 SetDistributionForPartitionRoot(root, GetBucketDistribution());
331 }
332 }
333
CreateCustomTestRoot(PartitionOptions opts,PartitionTestOptions test_opts)334 std::unique_ptr<PartitionRoot> CreateCustomTestRoot(
335 PartitionOptions opts,
336 PartitionTestOptions test_opts) {
337 auto root = std::make_unique<PartitionRoot>();
338 InitializeTestRoot(root.get(), opts, test_opts);
339 return root;
340 }
341
GetCommonPartitionOptions()342 PartitionOptions GetCommonPartitionOptions() {
343 PartitionOptions opts;
344 // Requires explicit `FreeFlag` to activate, no effect otherwise.
345 opts.zapping_by_free_flags = PartitionOptions::kEnabled;
346 opts.scheduler_loop_quarantine = PartitionOptions::kEnabled;
347 opts.scheduler_loop_quarantine_capacity_in_bytes =
348 std::numeric_limits<size_t>::max();
349 return opts;
350 }
351
InitializeMainTestAllocators()352 void InitializeMainTestAllocators() {
353 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
354 PartitionOptions::EnableToggle enable_backup_ref_ptr =
355 PartitionOptions::kEnabled;
356 #endif
357 #if BUILDFLAG(ENABLE_PKEYS)
358 int pkey = PkeyAlloc(UseThreadIsolatedPool() ? 0 : PKEY_DISABLE_WRITE);
359 if (pkey != -1) {
360 pkey_ = pkey;
361 }
362
363 PartitionOptions pkey_opts = GetCommonPartitionOptions();
364 pkey_opts.thread_isolation = ThreadIsolationOption(pkey_);
365 // We always want to have a pkey allocator initialized to make sure that the
366 // other pools still work. As part of the initializition, we tag some memory
367 // with the new pkey, effectively making it read-only. So there's some
368 // potential for breakage that this should catch.
369 InitializeTestRoot(pkey_allocator.root(), pkey_opts,
370 PartitionTestOptions{.use_memory_reclaimer = true});
371
372 ThreadIsolationOption thread_isolation_opt;
373 if (UseThreadIsolatedPool() && pkey_ != kInvalidPkey) {
374 thread_isolation_opt = ThreadIsolationOption(pkey_);
375 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
376 // BRP and thread isolated mode use different pools, so they can't be
377 // enabled at the same time.
378 enable_backup_ref_ptr = PartitionOptions::kDisabled;
379 #endif
380 }
381 #endif // BUILDFLAG(ENABLE_PKEYS)
382
383 PartitionOptions opts = GetCommonPartitionOptions();
384 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
385 opts.backup_ref_ptr = enable_backup_ref_ptr;
386 #endif
387 #if BUILDFLAG(ENABLE_PKEYS)
388 opts.thread_isolation = thread_isolation_opt;
389 #endif
390 #if BUILDFLAG(HAS_MEMORY_TAGGING)
391 opts.memory_tagging = {
392 .enabled =
393 partition_alloc::internal::base::CPU::GetInstanceNoAllocation()
394 .has_mte()
395 ? PartitionOptions::kEnabled
396 : PartitionOptions::kDisabled,
397 };
398 #endif // BUILDFLAG(HAS_MEMORY_TAGGING)
399 InitializeTestRoot(
400 allocator.root(), opts,
401 PartitionTestOptions{.use_memory_reclaimer = true,
402 .uncap_empty_slot_span_memory = true,
403 .set_bucket_distribution = true});
404 }
405
406 // Actual slot size used for requests of size kTestAllocSize.
ActualTestAllocSize() const407 size_t ActualTestAllocSize() const {
408 return SizeToBucketSize(kTestAllocSize + ExtraAllocSize(allocator));
409 }
410
SetUp()411 void SetUp() override {
412 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
413 StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning);
414 PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(true);
415 PartitionRoot::SetSortActiveSlotSpansEnabled(true);
416 PartitionAllocGlobalInit(HandleOOM);
417 InitializeMainTestAllocators();
418
419 test_bucket_index_ = SizeToIndex(ActualTestAllocSize());
420 }
421
TearDown()422 void TearDown() override {
423 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
424 PurgeFlags::kDiscardUnusedSystemPages);
425 PartitionAllocGlobalUninitForTesting();
426 #if BUILDFLAG(ENABLE_PKEYS)
427 if (pkey_ != kInvalidPkey) {
428 PkeyFree(pkey_);
429 }
430 #endif
431 }
432
SizeToIndex(size_t size) const433 size_t SizeToIndex(size_t size) const {
434 const auto distribution_to_use = GetBucketDistribution();
435 return PartitionRoot::SizeToBucketIndex(size, distribution_to_use);
436 }
437
SizeToBucketSize(size_t size) const438 size_t SizeToBucketSize(size_t size) const {
439 const auto index = SizeToIndex(size);
440 return allocator.root()->buckets[index].slot_size;
441 }
442
ExtraAllocSize(const PartitionAllocator & allocator)443 static size_t ExtraAllocSize(const PartitionAllocator& allocator) {
444 size_t metadata_size = 0;
445 // Duplicate the logic from PartitionRoot::Init().
446 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
447 if (allocator.root()->brp_enabled()) {
448 metadata_size =
449 AlignUpInSlotMetadataSizeForApple(kInSlotMetadataSizeAdjustment);
450 }
451 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
452 return kExtraAllocSizeWithoutMetadata + metadata_size;
453 }
454
GetNumPagesPerSlotSpan(size_t size)455 size_t GetNumPagesPerSlotSpan(size_t size) {
456 size_t real_size = size + ExtraAllocSize(allocator);
457 size_t bucket_index = SizeToIndex(real_size);
458 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
459 // TODO(tasak): make get_pages_per_slot_span() available at
460 // partition_alloc_unittest.cc. Is it allowable to make the code from
461 // partition_bucet.cc to partition_bucket.h?
462 return (bucket->num_system_pages_per_slot_span +
463 (NumSystemPagesPerPartitionPage() - 1)) /
464 NumSystemPagesPerPartitionPage();
465 }
466
GetFullSlotSpan(size_t size)467 SlotSpanMetadata* GetFullSlotSpan(size_t size) {
468 size_t real_size = size + ExtraAllocSize(allocator);
469 size_t bucket_index = SizeToIndex(real_size);
470 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
471 size_t num_slots =
472 (bucket->num_system_pages_per_slot_span * SystemPageSize()) /
473 bucket->slot_size;
474 uintptr_t first = 0;
475 uintptr_t last = 0;
476 size_t i;
477 for (i = 0; i < num_slots; ++i) {
478 void* ptr = allocator.root()->Alloc(size, type_name);
479 EXPECT_TRUE(ptr);
480 if (!i) {
481 first = allocator.root()->ObjectToSlotStart(ptr);
482 } else if (i == num_slots - 1) {
483 last = allocator.root()->ObjectToSlotStart(ptr);
484 }
485 }
486 EXPECT_EQ(SlotSpanMetadata::FromSlotStart(first),
487 SlotSpanMetadata::FromSlotStart(last));
488 if (bucket->num_system_pages_per_slot_span ==
489 NumSystemPagesPerPartitionPage()) {
490 EXPECT_EQ(first & PartitionPageBaseMask(),
491 last & PartitionPageBaseMask());
492 }
493 EXPECT_EQ(num_slots, bucket->active_slot_spans_head->num_allocated_slots);
494 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->get_freelist_head());
495 EXPECT_TRUE(bucket->is_valid());
496 EXPECT_TRUE(bucket->active_slot_spans_head !=
497 SlotSpanMetadata::get_sentinel_slot_span());
498 EXPECT_TRUE(bucket->active_slot_spans_head->is_full());
499 return bucket->active_slot_spans_head;
500 }
501
ClearEmptySlotSpanCache()502 void ClearEmptySlotSpanCache() {
503 allocator.root()->DecommitEmptySlotSpansForTesting();
504 }
505
506 enum ReturnNullTestMode {
507 kPartitionAlloc,
508 kPartitionRealloc,
509 };
510
DoReturnNullTest(size_t alloc_size,ReturnNullTestMode mode)511 void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
512 // TODO(crbug.com/678782): Where necessary and possible, disable the
513 // platform's OOM-killing behavior. OOM-killing makes this test flaky on
514 // low-memory devices.
515 if (!IsLargeMemoryDevice()) {
516 PA_LOG(WARNING)
517 << "Skipping test on this device because of crbug.com/678782";
518 PA_LOG(FATAL) << "Passed DoReturnNullTest";
519 }
520
521 ASSERT_TRUE(SetAddressSpaceLimit());
522
523 // Work out the number of allocations for 6 GB of memory.
524 const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
525
526 void** ptrs = static_cast<void**>(
527 allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
528 int i;
529
530 for (i = 0; i < num_allocations; ++i) {
531 switch (mode) {
532 case kPartitionAlloc: {
533 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
534 type_name);
535 break;
536 }
537 case kPartitionRealloc: {
538 ptrs[i] =
539 allocator.root()->Alloc<AllocFlags::kReturnNull>(1, type_name);
540 ptrs[i] = allocator.root()->Realloc<AllocFlags::kReturnNull>(
541 ptrs[i], alloc_size, type_name);
542 break;
543 }
544 }
545
546 if (!i) {
547 EXPECT_TRUE(ptrs[0]);
548 }
549 if (!ptrs[i]) {
550 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
551 type_name);
552 EXPECT_FALSE(ptrs[i]);
553 break;
554 }
555 }
556
557 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
558 // we're not actually testing anything here.
559 EXPECT_LT(i, num_allocations);
560
561 // Free, reallocate and free again each block we allocated. We do this to
562 // check that freeing memory also works correctly after a failed allocation.
563 for (--i; i >= 0; --i) {
564 allocator.root()->Free(ptrs[i]);
565 ptrs[i] = allocator.root()->Alloc<AllocFlags::kReturnNull>(alloc_size,
566 type_name);
567 EXPECT_TRUE(ptrs[i]);
568 allocator.root()->Free(ptrs[i]);
569 }
570
571 allocator.root()->Free(ptrs);
572
573 EXPECT_TRUE(ClearAddressSpaceLimit());
574 PA_LOG(FATAL) << "Passed DoReturnNullTest";
575 }
576
577 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
578 void RunRefCountReallocSubtest(size_t orig_size, size_t new_size);
579 #endif
580
Alloc(size_t size)581 PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t size) {
582 return allocator.root()->Alloc(size);
583 }
584
Free(void * ptr)585 PA_NOINLINE void Free(void* ptr) { allocator.root()->Free(ptr); }
586
GetBucketDistribution() const587 BucketDistribution GetBucketDistribution() const {
588 return GetParam().bucket_distribution;
589 }
590
UseThreadIsolatedPool() const591 bool UseThreadIsolatedPool() const { return GetParam().use_pkey_pool; }
UseBRPPool() const592 bool UseBRPPool() const {
593 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
594 return allocator.root()->brp_enabled();
595 #else
596 return false;
597 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
598 }
599
600 partition_alloc::PartitionAllocatorForTesting allocator;
601 #if BUILDFLAG(ENABLE_PKEYS)
602 partition_alloc::PartitionAllocatorForTesting pkey_allocator;
603 #endif
604 size_t test_bucket_index_;
605
606 #if BUILDFLAG(ENABLE_PKEYS)
607 int pkey_ = kInvalidPkey;
608 #endif
609 };
610
611 // Death tests misbehave on Android, http://crbug.com/643760.
612 #if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
613 #define PA_HAS_DEATH_TESTS
614
615 class PartitionAllocDeathTest : public PartitionAllocTest {};
616
617 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
618 PartitionAllocDeathTest,
619 testing::ValuesIn(GetPartitionAllocTestParams()));
620
621 #endif
622
623 namespace {
624
FreeFullSlotSpan(PartitionRoot * root,SlotSpanMetadata * slot_span)625 void FreeFullSlotSpan(PartitionRoot* root, SlotSpanMetadata* slot_span) {
626 EXPECT_TRUE(slot_span->is_full());
627 size_t size = slot_span->bucket->slot_size;
628 size_t num_slots =
629 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
630 size;
631 EXPECT_EQ(num_slots, slot_span->num_allocated_slots);
632 uintptr_t address = SlotSpanMetadata::ToSlotSpanStart(slot_span);
633 size_t i;
634 for (i = 0; i < num_slots; ++i) {
635 root->Free(root->SlotStartToObject(address));
636 address += size;
637 }
638 EXPECT_TRUE(slot_span->is_empty());
639 }
640
641 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
CheckPageInCore(void * ptr,bool in_core)642 bool CheckPageInCore(void* ptr, bool in_core) {
643 unsigned char ret = 0;
644 EXPECT_EQ(0, mincore(ptr, SystemPageSize(), &ret));
645 return in_core == (ret & 1);
646 }
647
648 #define CHECK_PAGE_IN_CORE(ptr, in_core) \
649 EXPECT_TRUE(CheckPageInCore(ptr, in_core))
650 #else
651 #define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
652 #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
653
654 class MockPartitionStatsDumper : public PartitionStatsDumper {
655 public:
656 MockPartitionStatsDumper() = default;
657
PartitionDumpTotals(const char * partition_name,const PartitionMemoryStats * stats)658 void PartitionDumpTotals(const char* partition_name,
659 const PartitionMemoryStats* stats) override {
660 EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
661 EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
662 EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
663 EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
664 EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
665 }
666
PartitionsDumpBucketStats(const char * partition_name,const PartitionBucketMemoryStats * stats)667 void PartitionsDumpBucketStats(
668 [[maybe_unused]] const char* partition_name,
669 const PartitionBucketMemoryStats* stats) override {
670 EXPECT_TRUE(stats->is_valid);
671 EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
672 bucket_stats.push_back(*stats);
673 total_resident_bytes += stats->resident_bytes;
674 total_active_bytes += stats->active_bytes;
675 total_decommittable_bytes += stats->decommittable_bytes;
676 total_discardable_bytes += stats->discardable_bytes;
677 }
678
IsMemoryAllocationRecorded()679 bool IsMemoryAllocationRecorded() {
680 return total_resident_bytes != 0 && total_active_bytes != 0;
681 }
682
GetBucketStats(size_t bucket_size)683 const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
684 for (auto& stat : bucket_stats) {
685 if (stat.bucket_slot_size == bucket_size) {
686 return &stat;
687 }
688 }
689 return nullptr;
690 }
691
692 private:
693 size_t total_resident_bytes = 0;
694 size_t total_active_bytes = 0;
695 size_t total_decommittable_bytes = 0;
696 size_t total_discardable_bytes = 0;
697
698 std::vector<PartitionBucketMemoryStats> bucket_stats;
699 };
700
701 } // namespace
702
703 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
704 PartitionAllocTest,
705 testing::ValuesIn(GetPartitionAllocTestParams()));
706
707 // Check that the most basic of allocate / free pairs work.
TEST_P(PartitionAllocTest,Basic)708 TEST_P(PartitionAllocTest, Basic) {
709 PartitionRoot::Bucket* bucket =
710 &allocator.root()->buckets[test_bucket_index_];
711 auto* seed_slot_span = SlotSpanMetadata::get_sentinel_slot_span();
712
713 EXPECT_FALSE(bucket->empty_slot_spans_head);
714 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
715 EXPECT_EQ(seed_slot_span, bucket->active_slot_spans_head);
716 EXPECT_EQ(nullptr, bucket->active_slot_spans_head->next_slot_span);
717
718 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
719 EXPECT_TRUE(ptr);
720 EXPECT_EQ(kPointerOffset, UntagPtr(ptr) & PartitionPageOffsetMask());
721 // Check that the offset appears to include a guard page.
722 EXPECT_EQ(PartitionPageSize() +
723 partition_alloc::internal::ReservedFreeSlotBitmapSize() +
724 kPointerOffset,
725 UntagPtr(ptr) & kSuperPageOffsetMask);
726
727 allocator.root()->Free(ptr);
728 // Expect that the last active slot span gets noticed as empty but doesn't get
729 // decommitted.
730 EXPECT_TRUE(bucket->empty_slot_spans_head);
731 EXPECT_FALSE(bucket->decommitted_slot_spans_head);
732 }
733
734 // Test multiple allocations, and freelist handling.
TEST_P(PartitionAllocTest,MultiAlloc)735 TEST_P(PartitionAllocTest, MultiAlloc) {
736 void* ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
737 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
738 EXPECT_TRUE(ptr1);
739 EXPECT_TRUE(ptr2);
740 ptrdiff_t diff = UntagPtr(ptr2) - UntagPtr(ptr1);
741 EXPECT_EQ(static_cast<ptrdiff_t>(ActualTestAllocSize()), diff);
742
743 // Check that we re-use the just-freed slot.
744 allocator.root()->Free(ptr2);
745 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
746 EXPECT_TRUE(ptr2);
747 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
748 EXPECT_EQ(static_cast<ptrdiff_t>(ActualTestAllocSize()), diff);
749 allocator.root()->Free(ptr1);
750 ptr1 = allocator.root()->Alloc(kTestAllocSize, type_name);
751 EXPECT_TRUE(ptr1);
752 diff = UntagPtr(ptr2) - UntagPtr(ptr1);
753 EXPECT_EQ(static_cast<ptrdiff_t>(ActualTestAllocSize()), diff);
754
755 void* ptr3 = allocator.root()->Alloc(kTestAllocSize, type_name);
756 EXPECT_TRUE(ptr3);
757 diff = UntagPtr(ptr3) - UntagPtr(ptr1);
758 EXPECT_EQ(static_cast<ptrdiff_t>(ActualTestAllocSize() * 2), diff);
759
760 allocator.root()->Free(ptr1);
761 allocator.root()->Free(ptr2);
762 allocator.root()->Free(ptr3);
763 }
764
765 // Test a bucket with multiple slot spans.
TEST_P(PartitionAllocTest,MultiSlotSpans)766 TEST_P(PartitionAllocTest, MultiSlotSpans) {
767 PartitionRoot::Bucket* bucket =
768 &allocator.root()->buckets[test_bucket_index_];
769
770 auto* slot_span = GetFullSlotSpan(kTestAllocSize);
771 FreeFullSlotSpan(allocator.root(), slot_span);
772 EXPECT_TRUE(bucket->empty_slot_spans_head);
773 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
774 bucket->active_slot_spans_head);
775 EXPECT_EQ(nullptr, slot_span->next_slot_span);
776 EXPECT_EQ(0u, slot_span->num_allocated_slots);
777
778 slot_span = GetFullSlotSpan(kTestAllocSize);
779 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
780
781 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
782 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
783 EXPECT_EQ(SlotSpanMetadata::ToSlotSpanStart(slot_span) & kSuperPageBaseMask,
784 SlotSpanMetadata::ToSlotSpanStart(slot_span2) & kSuperPageBaseMask);
785
786 // Fully free the non-current slot span. This will leave us with no current
787 // active slot span because one is empty and the other is full.
788 FreeFullSlotSpan(allocator.root(), slot_span);
789 EXPECT_EQ(0u, slot_span->num_allocated_slots);
790 EXPECT_TRUE(bucket->empty_slot_spans_head);
791 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
792 bucket->active_slot_spans_head);
793
794 // Allocate a new slot span, it should pull from the freelist.
795 slot_span = GetFullSlotSpan(kTestAllocSize);
796 EXPECT_FALSE(bucket->empty_slot_spans_head);
797 EXPECT_EQ(slot_span, bucket->active_slot_spans_head);
798
799 FreeFullSlotSpan(allocator.root(), slot_span);
800 FreeFullSlotSpan(allocator.root(), slot_span2);
801 EXPECT_EQ(0u, slot_span->num_allocated_slots);
802 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
803 EXPECT_EQ(0u, slot_span2->num_unprovisioned_slots);
804 EXPECT_TRUE(slot_span2->in_empty_cache());
805 }
806
807 // Test some finer aspects of internal slot span transitions.
TEST_P(PartitionAllocTest,SlotSpanTransitions)808 TEST_P(PartitionAllocTest, SlotSpanTransitions) {
809 PartitionRoot::Bucket* bucket =
810 &allocator.root()->buckets[test_bucket_index_];
811
812 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
813 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
814 EXPECT_EQ(nullptr, slot_span1->next_slot_span);
815 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
816 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
817 EXPECT_EQ(nullptr, slot_span2->next_slot_span);
818
819 // Bounce slot_span1 back into the non-full list then fill it up again.
820 void* ptr = allocator.root()->SlotStartToObject(
821 SlotSpanMetadata::ToSlotSpanStart(slot_span1));
822 allocator.root()->Free(ptr);
823 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
824 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
825 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
826 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head->next_slot_span);
827
828 // Allocating another slot span at this point should cause us to scan over
829 // slot_span1 (which is both full and NOT our current slot span), and evict it
830 // from the freelist. Older code had a O(n^2) condition due to failure to do
831 // this.
832 auto* slot_span3 = GetFullSlotSpan(kTestAllocSize);
833 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
834 EXPECT_EQ(nullptr, slot_span3->next_slot_span);
835
836 // Work out a pointer into slot_span2 and free it.
837 ptr = allocator.root()->SlotStartToObject(
838 SlotSpanMetadata::ToSlotSpanStart(slot_span2));
839 allocator.root()->Free(ptr);
840 // Trying to allocate at this time should cause us to cycle around to
841 // slot_span2 and find the recently freed slot.
842 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
843 PA_EXPECT_PTR_EQ(ptr, ptr2);
844 EXPECT_EQ(slot_span2, bucket->active_slot_spans_head);
845 EXPECT_EQ(slot_span3, slot_span2->next_slot_span);
846
847 // Work out a pointer into slot_span1 and free it. This should pull the slot
848 // span back into the list of available slot spans.
849 ptr = allocator.root()->SlotStartToObject(
850 SlotSpanMetadata::ToSlotSpanStart(slot_span1));
851 allocator.root()->Free(ptr);
852 // This allocation should be satisfied by slot_span1.
853 ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
854 PA_EXPECT_PTR_EQ(ptr, ptr2);
855 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
856 EXPECT_EQ(slot_span2, slot_span1->next_slot_span);
857
858 FreeFullSlotSpan(allocator.root(), slot_span3);
859 FreeFullSlotSpan(allocator.root(), slot_span2);
860 FreeFullSlotSpan(allocator.root(), slot_span1);
861
862 // Allocating whilst in this state exposed a bug, so keep the test.
863 ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
864 allocator.root()->Free(ptr);
865 }
866
867 // Test that ExtraAllocSize() is exactly what PA takes away from the slot for
868 // extras.
TEST_P(PartitionAllocTest,ExtraAllocSize)869 TEST_P(PartitionAllocTest, ExtraAllocSize) {
870 // There is a bucket with a slot size exactly that (asserted below).
871 size_t slot_size = 64;
872 size_t bucket_index =
873 allocator.root()->SizeToBucketIndex(slot_size, GetBucketDistribution());
874 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
875 ASSERT_EQ(bucket->slot_size, slot_size);
876
877 // The first allocation is expected to span exactly the capcity of the slot.
878 // The second one should overflow into a higher-size slot, and not fill its
879 // capacity.
880 size_t requested_size1 = slot_size - ExtraAllocSize(allocator);
881 size_t requested_size2 = requested_size1 + 1;
882 void* ptr1 = allocator.root()->Alloc(requested_size1);
883 void* ptr2 = allocator.root()->Alloc(requested_size2);
884 size_t capacity1 = allocator.root()->AllocationCapacityFromSlotStart(
885 allocator.root()->ObjectToSlotStart(ptr1));
886 size_t capacity2 = allocator.root()->AllocationCapacityFromSlotStart(
887 allocator.root()->ObjectToSlotStart(ptr2));
888 EXPECT_EQ(capacity1, requested_size1);
889 EXPECT_LT(capacity1, capacity2);
890 EXPECT_LT(requested_size2, capacity2);
891 allocator.root()->Free(ptr1);
892 allocator.root()->Free(ptr2);
893 }
894
TEST_P(PartitionAllocTest,PreferSlotSpansWithProvisionedEntries)895 TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {
896 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
897 size_t real_size = size + ExtraAllocSize(allocator);
898 size_t bucket_index =
899 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
900 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
901 ASSERT_EQ(bucket->slot_size, real_size);
902 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
903
904 // Make 10 full slot spans.
905 constexpr int kSpans = 10;
906 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
907 for (int span_index = 0; span_index < kSpans; span_index++) {
908 for (size_t i = 0; i < slots_per_span; i++) {
909 allocated_memory_spans[span_index].push_back(
910 allocator.root()->Alloc(size));
911 }
912 }
913
914 // Reverse ordering, since a newly non-full span is placed at the head of the
915 // active list.
916 for (int span_index = kSpans - 1; span_index >= 0; span_index--) {
917 allocator.root()->Free(allocated_memory_spans[span_index].back());
918 allocated_memory_spans[span_index].pop_back();
919 }
920
921 // Since slot spans are large enough and we freed memory from the end, the
922 // slot spans become partially provisioned after PurgeMemory().
923 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
924 PurgeFlags::kDiscardUnusedSystemPages);
925 std::vector<SlotSpanMetadata*> active_slot_spans;
926 for (auto* span = bucket->active_slot_spans_head; span;
927 span = span->next_slot_span) {
928 active_slot_spans.push_back(span);
929 ASSERT_EQ(span->num_unprovisioned_slots, 1u);
930 // But no freelist entries.
931 ASSERT_FALSE(span->get_freelist_head());
932 }
933
934 // Free one entry in the middle span, creating a freelist entry.
935 constexpr size_t kSpanIndex = 5;
936 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
937 allocated_memory_spans[kSpanIndex].pop_back();
938
939 ASSERT_TRUE(active_slot_spans[kSpanIndex]->get_freelist_head());
940 ASSERT_FALSE(bucket->active_slot_spans_head->get_freelist_head());
941
942 // It must come from the middle slot span even though the first one has
943 // unprovisioned space.
944 void* new_ptr = allocator.root()->Alloc(size);
945
946 // Comes from the middle slot span, since it has a freelist entry.
947 auto* new_active_slot_span = active_slot_spans[kSpanIndex];
948 ASSERT_FALSE(new_active_slot_span->get_freelist_head());
949
950 // The middle slot span was moved to the front.
951 active_slot_spans.erase(active_slot_spans.begin() + kSpanIndex);
952 active_slot_spans.insert(active_slot_spans.begin(), new_active_slot_span);
953
954 // Check slot span ordering.
955 int index = 0;
956 for (auto* span = bucket->active_slot_spans_head; span;
957 span = span->next_slot_span) {
958 EXPECT_EQ(span, active_slot_spans[index]);
959 index++;
960 }
961 EXPECT_EQ(index, kSpans);
962
963 allocator.root()->Free(new_ptr);
964 for (int span_index = 0; span_index < kSpans; span_index++) {
965 for (void* ptr : allocated_memory_spans[span_index]) {
966 allocator.root()->Free(ptr);
967 }
968 }
969 }
970
971 // Test some corner cases relating to slot span transitions in the internal
972 // free slot span list metadata bucket.
TEST_P(PartitionAllocTest,FreeSlotSpanListSlotSpanTransitions)973 TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
974 PartitionRoot::Bucket* bucket =
975 &allocator.root()->buckets[test_bucket_index_];
976
977 size_t num_to_fill_free_list_slot_span =
978 PartitionPageSize() / (sizeof(SlotSpan) + ExtraAllocSize(allocator));
979 // The +1 is because we need to account for the fact that the current slot
980 // span never gets thrown on the freelist.
981 ++num_to_fill_free_list_slot_span;
982 auto slot_spans =
983 std::make_unique<SlotSpanMetadata*[]>(num_to_fill_free_list_slot_span);
984
985 size_t i;
986 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
987 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
988 }
989 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
990 bucket->active_slot_spans_head);
991 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
992 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
993 }
994 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
995 bucket->active_slot_spans_head);
996 EXPECT_TRUE(bucket->empty_slot_spans_head);
997
998 // Allocate / free in a different bucket size so we get control of a
999 // different free slot span list. We need two slot spans because one will be
1000 // the last active slot span and not get freed.
1001 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize * 2);
1002 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize * 2);
1003 FreeFullSlotSpan(allocator.root(), slot_span1);
1004 FreeFullSlotSpan(allocator.root(), slot_span2);
1005
1006 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1007 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
1008 }
1009 EXPECT_EQ(slot_spans[num_to_fill_free_list_slot_span - 1],
1010 bucket->active_slot_spans_head);
1011
1012 for (i = 0; i < num_to_fill_free_list_slot_span; ++i) {
1013 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1014 }
1015 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
1016 bucket->active_slot_spans_head);
1017 EXPECT_TRUE(bucket->empty_slot_spans_head);
1018 }
1019
1020 // Test a large series of allocations that cross more than one underlying
1021 // super page.
TEST_P(PartitionAllocTest,MultiPageAllocs)1022 TEST_P(PartitionAllocTest, MultiPageAllocs) {
1023 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
1024 // 1 super page has 2 guard partition pages and a tag bitmap.
1025 size_t num_slot_spans_needed =
1026 (NumPartitionPagesPerSuperPage() - 2 -
1027 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
1028 num_pages_per_slot_span;
1029
1030 // We need one more slot span in order to cross super page boundary.
1031 ++num_slot_spans_needed;
1032
1033 EXPECT_GT(num_slot_spans_needed, 1u);
1034 auto slot_spans =
1035 std::make_unique<SlotSpanMetadata*[]>(num_slot_spans_needed);
1036 uintptr_t first_super_page_base = 0;
1037 size_t i;
1038 for (i = 0; i < num_slot_spans_needed; ++i) {
1039 slot_spans[i] = GetFullSlotSpan(kTestAllocSize);
1040 uintptr_t slot_span_start =
1041 SlotSpanMetadata::ToSlotSpanStart(slot_spans[i]);
1042 if (!i) {
1043 first_super_page_base = slot_span_start & kSuperPageBaseMask;
1044 }
1045 if (i == num_slot_spans_needed - 1) {
1046 uintptr_t second_super_page_base = slot_span_start & kSuperPageBaseMask;
1047 uintptr_t second_super_page_offset =
1048 slot_span_start & kSuperPageOffsetMask;
1049 EXPECT_FALSE(second_super_page_base == first_super_page_base);
1050 // Check that we allocated a guard page and the reserved tag bitmap for
1051 // the second page.
1052 EXPECT_EQ(PartitionPageSize() +
1053 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
1054 second_super_page_offset);
1055 }
1056 }
1057 for (i = 0; i < num_slot_spans_needed; ++i) {
1058 FreeFullSlotSpan(allocator.root(), slot_spans[i]);
1059 }
1060 }
1061
1062 // Test the generic allocation functions that can handle arbitrary sizes and
1063 // reallocing etc.
TEST_P(PartitionAllocTest,Alloc)1064 TEST_P(PartitionAllocTest, Alloc) {
1065 void* ptr = allocator.root()->Alloc(1, type_name);
1066 EXPECT_TRUE(ptr);
1067 allocator.root()->Free(ptr);
1068 ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
1069 EXPECT_TRUE(ptr);
1070 allocator.root()->Free(ptr);
1071
1072 // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
1073 // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
1074 // ExtraAllocSize(allocator), kAlignment)
1075 // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
1076 // ExtraAllocSize(allocator), kAlignment), because slot_size is multiples of
1077 // kAlignment. So (x + ExtraAllocSize(allocator)) must be multiples of
1078 // kAlignment. x =
1079 // partition_alloc::internal::base::bits::AlignUp(ExtraAllocSize(allocator),
1080 // kAlignment) - ExtraAllocSize(allocator);
1081 size_t base_size = partition_alloc::internal::base::bits::AlignUp(
1082 ExtraAllocSize(allocator), kAlignment) -
1083 ExtraAllocSize(allocator);
1084 ptr = allocator.root()->Alloc(base_size + 1, type_name);
1085 EXPECT_TRUE(ptr);
1086 void* orig_ptr = ptr;
1087 char* char_ptr = static_cast<char*>(ptr);
1088 *char_ptr = 'A';
1089
1090 // Change the size of the realloc, remaining inside the same bucket.
1091 void* new_ptr = allocator.root()->Realloc(ptr, base_size + 2, type_name);
1092 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1093 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1094 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1095 new_ptr =
1096 allocator.root()->Realloc(ptr, base_size + kSmallestBucket, type_name);
1097 PA_EXPECT_PTR_EQ(ptr, new_ptr);
1098
1099 // Change the size of the realloc, switching buckets.
1100 new_ptr = allocator.root()->Realloc(ptr, base_size + kSmallestBucket + 1,
1101 type_name);
1102 PA_EXPECT_PTR_NE(new_ptr, ptr);
1103 // Check that the realloc copied correctly.
1104 char* new_char_ptr = static_cast<char*>(new_ptr);
1105 EXPECT_EQ(*new_char_ptr, 'A');
1106 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1107 // Subtle: this checks for an old bug where we copied too much from the
1108 // source of the realloc. The condition can be detected by a trashing of
1109 // the uninitialized value in the space of the upsized allocation.
1110 EXPECT_EQ(kUninitializedByte,
1111 static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
1112 #endif
1113 *new_char_ptr = 'B';
1114 // The realloc moved. To check that the old allocation was freed, we can
1115 // do an alloc of the old allocation size and check that the old allocation
1116 // address is at the head of the freelist and reused.
1117 void* reused_ptr = allocator.root()->Alloc(base_size + 1, type_name);
1118 PA_EXPECT_PTR_EQ(reused_ptr, orig_ptr);
1119 allocator.root()->Free(reused_ptr);
1120
1121 // Downsize the realloc.
1122 ptr = new_ptr;
1123 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1124 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1125 new_char_ptr = static_cast<char*>(new_ptr);
1126 EXPECT_EQ(*new_char_ptr, 'B');
1127 *new_char_ptr = 'C';
1128
1129 // Upsize the realloc to outside the partition.
1130 ptr = new_ptr;
1131 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
1132 PA_EXPECT_PTR_NE(new_ptr, ptr);
1133 new_char_ptr = static_cast<char*>(new_ptr);
1134 EXPECT_EQ(*new_char_ptr, 'C');
1135 *new_char_ptr = 'D';
1136
1137 // Upsize and downsize the realloc, remaining outside the partition.
1138 ptr = new_ptr;
1139 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
1140 new_char_ptr = static_cast<char*>(new_ptr);
1141 EXPECT_EQ(*new_char_ptr, 'D');
1142 *new_char_ptr = 'E';
1143 ptr = new_ptr;
1144 new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
1145 new_char_ptr = static_cast<char*>(new_ptr);
1146 EXPECT_EQ(*new_char_ptr, 'E');
1147 *new_char_ptr = 'F';
1148
1149 // Downsize the realloc to inside the partition.
1150 ptr = new_ptr;
1151 new_ptr = allocator.root()->Realloc(ptr, base_size + 1, type_name);
1152 PA_EXPECT_PTR_NE(new_ptr, ptr);
1153 PA_EXPECT_PTR_EQ(new_ptr, orig_ptr);
1154 new_char_ptr = static_cast<char*>(new_ptr);
1155 EXPECT_EQ(*new_char_ptr, 'F');
1156
1157 allocator.root()->Free(new_ptr);
1158 }
1159
1160 // Test the generic allocation functions can handle some specific sizes of
1161 // interest.
TEST_P(PartitionAllocTest,AllocSizes)1162 TEST_P(PartitionAllocTest, AllocSizes) {
1163 {
1164 void* ptr = allocator.root()->Alloc(0, type_name);
1165 EXPECT_TRUE(ptr);
1166 allocator.root()->Free(ptr);
1167 }
1168
1169 {
1170 // PartitionPageSize() is interesting because it results in just one
1171 // allocation per page, which tripped up some corner cases.
1172 const size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
1173 void* ptr = allocator.root()->Alloc(size, type_name);
1174 EXPECT_TRUE(ptr);
1175 void* ptr2 = allocator.root()->Alloc(size, type_name);
1176 EXPECT_TRUE(ptr2);
1177 allocator.root()->Free(ptr);
1178 // Should be freeable at this point.
1179 auto* slot_span = SlotSpanMetadata::FromSlotStart(
1180 allocator.root()->ObjectToSlotStart(ptr));
1181 EXPECT_TRUE(slot_span->in_empty_cache());
1182 allocator.root()->Free(ptr2);
1183 }
1184
1185 {
1186 // Single-slot slot span size.
1187 const size_t size =
1188 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan + 1;
1189
1190 void* ptr = allocator.root()->Alloc(size, type_name);
1191 EXPECT_TRUE(ptr);
1192 memset(ptr, 'A', size);
1193 void* ptr2 = allocator.root()->Alloc(size, type_name);
1194 EXPECT_TRUE(ptr2);
1195 void* ptr3 = allocator.root()->Alloc(size, type_name);
1196 EXPECT_TRUE(ptr3);
1197 void* ptr4 = allocator.root()->Alloc(size, type_name);
1198 EXPECT_TRUE(ptr4);
1199
1200 auto* slot_span = SlotSpanMetadata::FromSlotStart(
1201 allocator.root()->ObjectToSlotStart(ptr));
1202 auto* slot_span2 = SlotSpanMetadata::FromSlotStart(
1203 allocator.root()->ObjectToSlotStart(ptr3));
1204 EXPECT_NE(slot_span, slot_span2);
1205
1206 allocator.root()->Free(ptr);
1207 allocator.root()->Free(ptr3);
1208 allocator.root()->Free(ptr2);
1209 // Should be freeable at this point.
1210 EXPECT_TRUE(slot_span->in_empty_cache());
1211 EXPECT_EQ(0u, slot_span->num_allocated_slots);
1212 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1213 void* new_ptr_1 = allocator.root()->Alloc(size, type_name);
1214 PA_EXPECT_PTR_EQ(ptr2, new_ptr_1);
1215 void* new_ptr_2 = allocator.root()->Alloc(size, type_name);
1216 PA_EXPECT_PTR_EQ(ptr3, new_ptr_2);
1217
1218 allocator.root()->Free(new_ptr_1);
1219 allocator.root()->Free(new_ptr_2);
1220 allocator.root()->Free(ptr4);
1221
1222 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1223 // |SlotSpanMetadata::Free| must poison the slot's contents with
1224 // |kFreedByte|.
1225 EXPECT_EQ(kFreedByte,
1226 *(static_cast<unsigned char*>(new_ptr_1) + (size - 1)));
1227 #endif
1228 }
1229
1230 // Can we allocate a massive (128MB) size?
1231 // Add +1, to test for cookie writing alignment issues.
1232 // Test this only if the device has enough memory or it might fail due
1233 // to OOM.
1234 if (IsLargeMemoryDevice()) {
1235 void* ptr = allocator.root()->Alloc(128 * 1024 * 1024 + 1, type_name);
1236 allocator.root()->Free(ptr);
1237 }
1238
1239 {
1240 // Check a more reasonable, but still direct mapped, size.
1241 // Chop a system page and a byte off to test for rounding errors.
1242 size_t size = 20 * 1024 * 1024;
1243 ASSERT_GT(size, kMaxBucketed);
1244 size -= SystemPageSize();
1245 size -= 1;
1246 void* ptr = allocator.root()->Alloc(size, type_name);
1247 char* char_ptr = static_cast<char*>(ptr);
1248 *(char_ptr + (size - 1)) = 'A';
1249 allocator.root()->Free(ptr);
1250
1251 // Can we free null?
1252 allocator.root()->Free(nullptr);
1253
1254 // Do we correctly get a null for a failed allocation?
1255 EXPECT_EQ(nullptr, allocator.root()->Alloc<AllocFlags::kReturnNull>(
1256 3u * 1024 * 1024 * 1024, type_name));
1257 }
1258 }
1259
1260 // Test that we can fetch the real allocated size after an allocation.
TEST_P(PartitionAllocTest,AllocGetSizeAndStart)1261 TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {
1262 void* ptr;
1263 size_t requested_size, actual_capacity, predicted_capacity;
1264
1265 // Allocate something small.
1266 requested_size = 511 - ExtraAllocSize(allocator);
1267 predicted_capacity =
1268 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1269 ptr = allocator.root()->Alloc(requested_size, type_name);
1270 EXPECT_TRUE(ptr);
1271 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1272 actual_capacity =
1273 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1274 EXPECT_EQ(predicted_capacity, actual_capacity);
1275 EXPECT_LT(requested_size, actual_capacity);
1276 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1277 if (UseBRPPool()) {
1278 uintptr_t address = UntagPtr(ptr);
1279 for (size_t offset = 0; offset < requested_size; ++offset) {
1280 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1281 .slot_start,
1282 slot_start);
1283 }
1284 }
1285 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1286 allocator.root()->Free(ptr);
1287
1288 // Allocate a size that should be a perfect match for a bucket, because it
1289 // is an exact power of 2.
1290 requested_size = (256 * 1024) - ExtraAllocSize(allocator);
1291 predicted_capacity =
1292 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1293 ptr = allocator.root()->Alloc(requested_size, type_name);
1294 EXPECT_TRUE(ptr);
1295 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1296 actual_capacity =
1297 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1298 EXPECT_EQ(predicted_capacity, actual_capacity);
1299 EXPECT_EQ(requested_size, actual_capacity);
1300 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1301 if (UseBRPPool()) {
1302 uintptr_t address = UntagPtr(ptr);
1303 for (size_t offset = 0; offset < requested_size; offset += 877) {
1304 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1305 .slot_start,
1306 slot_start);
1307 }
1308 }
1309 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1310 allocator.root()->Free(ptr);
1311
1312 // Allocate a size that is a system page smaller than a bucket.
1313 // AllocationCapacityFromSlotStart() should return a larger size than we asked
1314 // for now.
1315 size_t num = 64;
1316 while (num * SystemPageSize() >= 1024 * 1024) {
1317 num /= 2;
1318 }
1319 requested_size =
1320 num * SystemPageSize() - SystemPageSize() - ExtraAllocSize(allocator);
1321 predicted_capacity =
1322 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1323 ptr = allocator.root()->Alloc(requested_size, type_name);
1324 EXPECT_TRUE(ptr);
1325 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1326 actual_capacity =
1327 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1328 EXPECT_EQ(predicted_capacity, actual_capacity);
1329 EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
1330 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1331 if (UseBRPPool()) {
1332 uintptr_t address = UntagPtr(ptr);
1333 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1334 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1335 .slot_start,
1336 slot_start);
1337 }
1338 }
1339 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1340 allocator.root()->Free(ptr);
1341
1342 // Allocate the maximum allowed bucketed size.
1343 requested_size = kMaxBucketed - ExtraAllocSize(allocator);
1344 predicted_capacity =
1345 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1346 ptr = allocator.root()->Alloc(requested_size, type_name);
1347 EXPECT_TRUE(ptr);
1348 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1349 actual_capacity =
1350 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1351 EXPECT_EQ(predicted_capacity, actual_capacity);
1352 EXPECT_EQ(requested_size, actual_capacity);
1353 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1354 if (UseBRPPool()) {
1355 uintptr_t address = UntagPtr(ptr);
1356 for (size_t offset = 0; offset < requested_size; offset += 4999) {
1357 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1358 .slot_start,
1359 slot_start);
1360 }
1361 }
1362 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1363
1364 // Check that we can write at the end of the reported size too.
1365 char* char_ptr = static_cast<char*>(ptr);
1366 *(char_ptr + (actual_capacity - 1)) = 'A';
1367 allocator.root()->Free(ptr);
1368
1369 // Allocate something very large, and uneven.
1370 if (IsLargeMemoryDevice()) {
1371 requested_size = 128 * 1024 * 1024 - 33;
1372 predicted_capacity =
1373 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1374 ptr = allocator.root()->Alloc(requested_size, type_name);
1375 EXPECT_TRUE(ptr);
1376 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1377 actual_capacity =
1378 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1379 EXPECT_EQ(predicted_capacity, actual_capacity);
1380
1381 EXPECT_LT(requested_size, actual_capacity);
1382
1383 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1384 if (UseBRPPool()) {
1385 uintptr_t address = UntagPtr(ptr);
1386 for (size_t offset = 0; offset < requested_size; offset += 16111) {
1387 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1388 .slot_start,
1389 slot_start);
1390 }
1391 }
1392 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1393 allocator.root()->Free(ptr);
1394 }
1395
1396 // Too large allocation.
1397 requested_size = MaxDirectMapped() + 1;
1398 predicted_capacity =
1399 allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
1400 EXPECT_EQ(requested_size, predicted_capacity);
1401 }
1402
1403 #if BUILDFLAG(HAS_MEMORY_TAGGING)
TEST_P(PartitionAllocTest,MTEProtectsFreedPtr)1404 TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
1405 // This test checks that Arm's memory tagging extension (MTE) is correctly
1406 // protecting freed pointers.
1407 base::CPU cpu;
1408 if (!cpu.has_mte()) {
1409 // This test won't pass without MTE support.
1410 GTEST_SKIP();
1411 }
1412
1413 // Create an arbitrarily-sized small allocation.
1414 size_t alloc_size = 64 - ExtraAllocSize(allocator);
1415 uint64_t* ptr1 =
1416 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1417 EXPECT_TRUE(ptr1);
1418
1419 // Invalidate the pointer by freeing it.
1420 allocator.root()->Free(ptr1);
1421
1422 // When we immediately reallocate a pointer, we should see the same allocation
1423 // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
1424 uint64_t* ptr2 =
1425 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1426 PA_EXPECT_PTR_EQ(ptr1, ptr2);
1427 // The different tag bits mean that ptr1 is not the same as ptr2.
1428 EXPECT_NE(ptr1, ptr2);
1429
1430 // When we free again, we expect a new tag for that area that's different from
1431 // ptr1 and ptr2.
1432 allocator.root()->Free(ptr2);
1433 uint64_t* ptr3 =
1434 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
1435 PA_EXPECT_PTR_EQ(ptr2, ptr3);
1436 EXPECT_NE(ptr1, ptr3);
1437 EXPECT_NE(ptr2, ptr3);
1438
1439 // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
1440 allocator.root()->Free(ptr3);
1441 }
1442 #endif // BUILDFLAG(HAS_MEMORY_TAGGING)
1443
1444 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST_P(PartitionAllocTest,IsPtrWithinSameAlloc)1445 TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {
1446 if (!UseBRPPool()) {
1447 return;
1448 }
1449
1450 const size_t kMinReasonableTestSize =
1451 partition_alloc::internal::base::bits::AlignUp(
1452 ExtraAllocSize(allocator) + 1, kAlignment);
1453 ASSERT_GT(kMinReasonableTestSize, ExtraAllocSize(allocator));
1454 const size_t kSizes[] = {kMinReasonableTestSize,
1455 256,
1456 SystemPageSize(),
1457 PartitionPageSize(),
1458 MaxRegularSlotSpanSize(),
1459 MaxRegularSlotSpanSize() + 1,
1460 MaxRegularSlotSpanSize() + SystemPageSize(),
1461 MaxRegularSlotSpanSize() + PartitionPageSize(),
1462 kMaxBucketed,
1463 kMaxBucketed + 1,
1464 kMaxBucketed + SystemPageSize(),
1465 kMaxBucketed + PartitionPageSize(),
1466 kSuperPageSize};
1467 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1468 constexpr size_t kFarFarAwayDelta = 512 * kGiB;
1469 #else
1470 constexpr size_t kFarFarAwayDelta = kGiB;
1471 #endif
1472 for (size_t size : kSizes) {
1473 size_t requested_size = size - ExtraAllocSize(allocator);
1474 // For regular slot-span allocations, confirm the size fills the entire
1475 // slot. Otherwise the test would be ineffective, as PartitionAlloc has no
1476 // ability to check against the actual allocated size.
1477 // Single-slot slot-spans and direct map don't have that problem.
1478 if (size <= MaxRegularSlotSpanSize()) {
1479 ASSERT_EQ(requested_size,
1480 allocator.root()->AllocationCapacityFromRequestedSize(
1481 requested_size));
1482 }
1483
1484 constexpr size_t kNumRepeats = 3;
1485 void* ptrs[kNumRepeats];
1486 for (void*& ptr : ptrs) {
1487 ptr = allocator.root()->Alloc(requested_size, type_name);
1488 // Double check.
1489 if (size <= MaxRegularSlotSpanSize()) {
1490 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1491 EXPECT_EQ(
1492 requested_size,
1493 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1494 }
1495
1496 uintptr_t address = UntagPtr(ptr);
1497 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kFarFarAwayDelta, 0u),
1498 PtrPosWithinAlloc::kFarOOB);
1499 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - kSuperPageSize, 0u),
1500 PtrPosWithinAlloc::kFarOOB);
1501 EXPECT_EQ(IsPtrWithinSameAlloc(address, address - 1, 0u),
1502 PtrPosWithinAlloc::kFarOOB);
1503 EXPECT_EQ(IsPtrWithinSameAlloc(address, address, 0u),
1504 PtrPosWithinAlloc::kInBounds);
1505 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size / 2, 0u),
1506 PtrPosWithinAlloc::kInBounds);
1507 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1508 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 1, 1u),
1509 PtrPosWithinAlloc::kInBounds);
1510 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 1u),
1511 PtrPosWithinAlloc::kAllocEnd);
1512 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size - 4, 4u),
1513 PtrPosWithinAlloc::kInBounds);
1514 for (size_t subtrahend = 0; subtrahend < 4; subtrahend++) {
1515 EXPECT_EQ(IsPtrWithinSameAlloc(
1516 address, address + requested_size - subtrahend, 4u),
1517 PtrPosWithinAlloc::kAllocEnd);
1518 }
1519 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1520 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size, 0u),
1521 PtrPosWithinAlloc::kInBounds);
1522 #endif
1523 EXPECT_EQ(IsPtrWithinSameAlloc(address, address + requested_size + 1, 0u),
1524 PtrPosWithinAlloc::kFarOOB);
1525 EXPECT_EQ(IsPtrWithinSameAlloc(
1526 address, address + requested_size + kSuperPageSize, 0u),
1527 PtrPosWithinAlloc::kFarOOB);
1528 EXPECT_EQ(IsPtrWithinSameAlloc(
1529 address, address + requested_size + kFarFarAwayDelta, 0u),
1530 PtrPosWithinAlloc::kFarOOB);
1531 EXPECT_EQ(
1532 IsPtrWithinSameAlloc(address + requested_size,
1533 address + requested_size + kFarFarAwayDelta, 0u),
1534 PtrPosWithinAlloc::kFarOOB);
1535 EXPECT_EQ(
1536 IsPtrWithinSameAlloc(address + requested_size,
1537 address + requested_size + kSuperPageSize, 0u),
1538 PtrPosWithinAlloc::kFarOOB);
1539 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1540 address + requested_size + 1, 0u),
1541 PtrPosWithinAlloc::kFarOOB);
1542 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1543 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1544 address + requested_size - 1, 1u),
1545 PtrPosWithinAlloc::kInBounds);
1546 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 1,
1547 address + requested_size, 1u),
1548 PtrPosWithinAlloc::kAllocEnd);
1549 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1550 address + requested_size, 1u),
1551 PtrPosWithinAlloc::kAllocEnd);
1552 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size - 4,
1553 address + requested_size - 4, 4u),
1554 PtrPosWithinAlloc::kInBounds);
1555 for (size_t addend = 1; addend < 4; addend++) {
1556 EXPECT_EQ(
1557 IsPtrWithinSameAlloc(address + requested_size - 4,
1558 address + requested_size - 4 + addend, 4u),
1559 PtrPosWithinAlloc::kAllocEnd);
1560 }
1561 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1562 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1563 address + requested_size, 0u),
1564 PtrPosWithinAlloc::kInBounds);
1565 #endif
1566 EXPECT_EQ(IsPtrWithinSameAlloc(
1567 address + requested_size,
1568 address + requested_size - (requested_size / 2), 0u),
1569 PtrPosWithinAlloc::kInBounds);
1570 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address, 0u),
1571 PtrPosWithinAlloc::kInBounds);
1572 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size, address - 1, 0u),
1573 PtrPosWithinAlloc::kFarOOB);
1574 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1575 address - kSuperPageSize, 0u),
1576 PtrPosWithinAlloc::kFarOOB);
1577 EXPECT_EQ(IsPtrWithinSameAlloc(address + requested_size,
1578 address - kFarFarAwayDelta, 0u),
1579 PtrPosWithinAlloc::kFarOOB);
1580 }
1581
1582 for (void* ptr : ptrs) {
1583 allocator.root()->Free(ptr);
1584 }
1585 }
1586 }
1587
TEST_P(PartitionAllocTest,GetSlotStartMultiplePages)1588 TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {
1589 if (!UseBRPPool()) {
1590 return;
1591 }
1592
1593 auto* root = allocator.root();
1594 // Find the smallest bucket with multiple PartitionPages. When searching for
1595 // a bucket here, we need to check two conditions:
1596 // (1) The bucket is used in our current bucket distribution.
1597 // (2) The bucket is large enough that our requested size (see below) will be
1598 // non-zero.
1599 size_t real_size = 0;
1600 for (const auto& bucket : root->buckets) {
1601 if ((root->buckets + SizeToIndex(bucket.slot_size))->slot_size !=
1602 bucket.slot_size) {
1603 continue;
1604 }
1605 if (bucket.slot_size <= ExtraAllocSize(allocator)) {
1606 continue;
1607 }
1608 if (bucket.num_system_pages_per_slot_span >
1609 NumSystemPagesPerPartitionPage()) {
1610 real_size = bucket.slot_size;
1611 break;
1612 }
1613 }
1614
1615 // Make sure that we've managed to find an appropriate bucket.
1616 ASSERT_GT(real_size, 0u);
1617
1618 const size_t requested_size = real_size - ExtraAllocSize(allocator);
1619 // Double check we don't end up with 0 or negative size.
1620 EXPECT_GT(requested_size, 0u);
1621 EXPECT_LE(requested_size, real_size);
1622 const auto* bucket = allocator.root()->buckets + SizeToIndex(real_size);
1623 EXPECT_EQ(bucket->slot_size, real_size);
1624 // Make sure the test is testing multiple partition pages case.
1625 EXPECT_GT(bucket->num_system_pages_per_slot_span,
1626 PartitionPageSize() / SystemPageSize());
1627 size_t num_slots =
1628 (bucket->num_system_pages_per_slot_span * SystemPageSize()) / real_size;
1629 std::vector<void*> ptrs;
1630 for (size_t i = 0; i < num_slots; ++i) {
1631 ptrs.push_back(allocator.root()->Alloc(requested_size, type_name));
1632 }
1633 for (void* ptr : ptrs) {
1634 uintptr_t address = UntagPtr(ptr);
1635 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1636 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start),
1637 requested_size);
1638 for (size_t offset = 0; offset < requested_size; offset += 13) {
1639 EXPECT_EQ(PartitionAllocGetSlotStartAndSizeInBRPPool(address + offset)
1640 .slot_start,
1641 slot_start);
1642 }
1643 allocator.root()->Free(ptr);
1644 }
1645 }
1646 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1647
1648 // Test the realloc() contract.
TEST_P(PartitionAllocTest,Realloc)1649 TEST_P(PartitionAllocTest, Realloc) {
1650 // realloc(0, size) should be equivalent to malloc().
1651 void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
1652 memset(ptr, 'A', kTestAllocSize);
1653 auto* slot_span =
1654 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1655 // realloc(ptr, 0) should be equivalent to free().
1656 void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
1657 EXPECT_EQ(nullptr, ptr2);
1658 EXPECT_EQ(allocator.root()->ObjectToSlotStart(ptr),
1659 UntagPtr(slot_span->get_freelist_head()));
1660
1661 // Test that growing an allocation with realloc() copies everything from the
1662 // old allocation.
1663 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
1664 // Confirm size fills the entire slot.
1665 ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1666 ptr = allocator.root()->Alloc(size, type_name);
1667 memset(ptr, 'A', size);
1668 ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
1669 PA_EXPECT_PTR_NE(ptr, ptr2);
1670 char* char_ptr2 = static_cast<char*>(ptr2);
1671 EXPECT_EQ('A', char_ptr2[0]);
1672 EXPECT_EQ('A', char_ptr2[size - 1]);
1673 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1674 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1675 #endif
1676
1677 // Test that shrinking an allocation with realloc() also copies everything
1678 // from the old allocation. Use |size - 1| to test what happens to the extra
1679 // space before the cookie.
1680 ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
1681 PA_EXPECT_PTR_NE(ptr2, ptr);
1682 char* char_ptr = static_cast<char*>(ptr);
1683 EXPECT_EQ('A', char_ptr[0]);
1684 EXPECT_EQ('A', char_ptr[size - 2]);
1685 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1686 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
1687 #endif
1688
1689 allocator.root()->Free(ptr);
1690
1691 // Single-slot slot spans...
1692 // Test that growing an allocation with realloc() copies everything from the
1693 // old allocation.
1694 size = MaxRegularSlotSpanSize() + 1;
1695 ASSERT_LE(2 * size, kMaxBucketed); // should be in single-slot span range
1696 // Confirm size doesn't fill the entire slot.
1697 ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
1698 ptr = allocator.root()->Alloc(size, type_name);
1699 memset(ptr, 'A', size);
1700 ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
1701 PA_EXPECT_PTR_NE(ptr, ptr2);
1702 char_ptr2 = static_cast<char*>(ptr2);
1703 EXPECT_EQ('A', char_ptr2[0]);
1704 EXPECT_EQ('A', char_ptr2[size - 1]);
1705 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1706 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr2[size]));
1707 #endif
1708 allocator.root()->Free(ptr2);
1709
1710 // Test that shrinking an allocation with realloc() also copies everything
1711 // from the old allocation.
1712 size = 2 * (MaxRegularSlotSpanSize() + 1);
1713 ASSERT_GT(size / 2, MaxRegularSlotSpanSize()); // in single-slot span range
1714 ptr = allocator.root()->Alloc(size, type_name);
1715 memset(ptr, 'A', size);
1716 ptr2 = allocator.root()->Realloc(ptr2, size / 2, type_name);
1717 PA_EXPECT_PTR_NE(ptr, ptr2);
1718 char_ptr2 = static_cast<char*>(ptr2);
1719 EXPECT_EQ('A', char_ptr2[0]);
1720 EXPECT_EQ('A', char_ptr2[size / 2 - 1]);
1721 #if BUILDFLAG(PA_DCHECK_IS_ON)
1722 // For single-slot slot spans, the cookie is always placed immediately after
1723 // the allocation.
1724 EXPECT_EQ(kCookieValue[0], static_cast<unsigned char>(char_ptr2[size / 2]));
1725 #endif
1726 allocator.root()->Free(ptr2);
1727
1728 // Test that shrinking a direct mapped allocation happens in-place.
1729 // Pick a large size so that Realloc doesn't think it's worthwhile to
1730 // downsize even if one less super page is used (due to high granularity on
1731 // 64-bit systems).
1732 size = 10 * kSuperPageSize + SystemPageSize() - 42;
1733 ASSERT_GT(size - 32 * SystemPageSize(), kMaxBucketed);
1734 ptr = allocator.root()->Alloc(size, type_name);
1735 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1736 size_t actual_capacity =
1737 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1738 ptr2 = allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1739 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1740 EXPECT_EQ(slot_start, slot_start2);
1741 EXPECT_EQ(actual_capacity - SystemPageSize(),
1742 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1743 void* ptr3 =
1744 allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(), type_name);
1745 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1746 EXPECT_EQ(slot_start2, slot_start3);
1747 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1748 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1749
1750 // Test that a previously in-place shrunk direct mapped allocation can be
1751 // expanded up again up to its original size.
1752 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1753 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1754 EXPECT_EQ(slot_start3, slot_start);
1755 EXPECT_EQ(actual_capacity,
1756 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1757
1758 // Test that the allocation can be expanded in place up to its capacity.
1759 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1760 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1761 EXPECT_EQ(slot_start, slot_start2);
1762 EXPECT_EQ(actual_capacity,
1763 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1764
1765 // Test that a direct mapped allocation is performed not in-place when the
1766 // new size is small enough.
1767 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1768 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1769 EXPECT_NE(slot_start, slot_start3);
1770
1771 allocator.root()->Free(ptr3);
1772 }
1773
TEST_P(PartitionAllocTest,ReallocDirectMapAligned)1774 TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {
1775 size_t alignments[] = {
1776 PartitionPageSize(),
1777 2 * PartitionPageSize(),
1778 kMaxSupportedAlignment / 2,
1779 kMaxSupportedAlignment,
1780 };
1781
1782 for (size_t alignment : alignments) {
1783 // Test that shrinking a direct mapped allocation happens in-place.
1784 // Pick a large size so that Realloc doesn't think it's worthwhile to
1785 // downsize even if one less super page is used (due to high granularity on
1786 // 64-bit systems), even if the alignment padding is taken out.
1787 size_t size = 10 * kSuperPageSize + SystemPageSize() - 42;
1788 ASSERT_GT(size, kMaxBucketed);
1789 void* ptr =
1790 allocator.root()->AllocInternalForTesting(size, alignment, type_name);
1791 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
1792 size_t actual_capacity =
1793 allocator.root()->AllocationCapacityFromSlotStart(slot_start);
1794 void* ptr2 =
1795 allocator.root()->Realloc(ptr, size - SystemPageSize(), type_name);
1796 uintptr_t slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1797 EXPECT_EQ(slot_start, slot_start2);
1798 EXPECT_EQ(actual_capacity - SystemPageSize(),
1799 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1800 void* ptr3 = allocator.root()->Realloc(ptr2, size - 32 * SystemPageSize(),
1801 type_name);
1802 uintptr_t slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1803 EXPECT_EQ(slot_start2, slot_start3);
1804 EXPECT_EQ(actual_capacity - 32 * SystemPageSize(),
1805 allocator.root()->AllocationCapacityFromSlotStart(slot_start3));
1806
1807 // Test that a previously in-place shrunk direct mapped allocation can be
1808 // expanded up again up to its original size.
1809 ptr = allocator.root()->Realloc(ptr3, size, type_name);
1810 slot_start = allocator.root()->ObjectToSlotStart(ptr);
1811 EXPECT_EQ(slot_start3, slot_start);
1812 EXPECT_EQ(actual_capacity,
1813 allocator.root()->AllocationCapacityFromSlotStart(slot_start));
1814
1815 // Test that the allocation can be expanded in place up to its capacity.
1816 ptr2 = allocator.root()->Realloc(ptr, actual_capacity, type_name);
1817 slot_start2 = allocator.root()->ObjectToSlotStart(ptr2);
1818 EXPECT_EQ(slot_start, slot_start2);
1819 EXPECT_EQ(actual_capacity,
1820 allocator.root()->AllocationCapacityFromSlotStart(slot_start2));
1821
1822 // Test that a direct mapped allocation is performed not in-place when the
1823 // new size is small enough.
1824 ptr3 = allocator.root()->Realloc(ptr2, SystemPageSize(), type_name);
1825 slot_start3 = allocator.root()->ObjectToSlotStart(ptr3);
1826 EXPECT_NE(slot_start2, slot_start3);
1827
1828 allocator.root()->Free(ptr3);
1829 }
1830 }
1831
TEST_P(PartitionAllocTest,ReallocDirectMapAlignedRelocate)1832 TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {
1833 // Pick size such that the alignment will put it cross the super page
1834 // boundary.
1835 size_t size = 2 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1836 ASSERT_GT(size, kMaxBucketed);
1837 void* ptr = allocator.root()->AllocInternalForTesting(
1838 size, kMaxSupportedAlignment, type_name);
1839 // Reallocating with the same size will actually relocate, because without a
1840 // need for alignment we can downsize the reservation significantly.
1841 void* ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1842 PA_EXPECT_PTR_NE(ptr, ptr2);
1843 allocator.root()->Free(ptr2);
1844
1845 // Again pick size such that the alignment will put it cross the super page
1846 // boundary, but this time make it so large that Realloc doesn't fing it worth
1847 // shrinking.
1848 size = 10 * kSuperPageSize - kMaxSupportedAlignment + SystemPageSize();
1849 ASSERT_GT(size, kMaxBucketed);
1850 ptr = allocator.root()->AllocInternalForTesting(size, kMaxSupportedAlignment,
1851 type_name);
1852 ptr2 = allocator.root()->Realloc(ptr, size, type_name);
1853 EXPECT_EQ(ptr, ptr2);
1854 allocator.root()->Free(ptr2);
1855 }
1856
1857 // Tests the handing out of freelists for partial slot spans.
TEST_P(PartitionAllocTest,PartialPageFreelists)1858 TEST_P(PartitionAllocTest, PartialPageFreelists) {
1859 size_t big_size = SystemPageSize() - ExtraAllocSize(allocator);
1860 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
1861 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
1862 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1863
1864 void* ptr = allocator.root()->Alloc(big_size, type_name);
1865 EXPECT_TRUE(ptr);
1866
1867 auto* slot_span =
1868 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1869 size_t total_slots =
1870 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1871 (big_size + ExtraAllocSize(allocator));
1872 EXPECT_EQ(4u, total_slots);
1873 // The freelist should have one entry, because we were able to exactly fit
1874 // one object slot and one freelist pointer (the null that the head points
1875 // to) into a system page.
1876 EXPECT_FALSE(slot_span->get_freelist_head());
1877 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1878 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
1879
1880 void* ptr2 = allocator.root()->Alloc(big_size, type_name);
1881 EXPECT_TRUE(ptr2);
1882 EXPECT_FALSE(slot_span->get_freelist_head());
1883 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1884 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
1885
1886 void* ptr3 = allocator.root()->Alloc(big_size, type_name);
1887 EXPECT_TRUE(ptr3);
1888 EXPECT_FALSE(slot_span->get_freelist_head());
1889 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1890 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
1891
1892 void* ptr4 = allocator.root()->Alloc(big_size, type_name);
1893 EXPECT_TRUE(ptr4);
1894 EXPECT_FALSE(slot_span->get_freelist_head());
1895 EXPECT_EQ(4u, slot_span->num_allocated_slots);
1896 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
1897
1898 void* ptr5 = allocator.root()->Alloc(big_size, type_name);
1899 EXPECT_TRUE(ptr5);
1900
1901 auto* slot_span2 = SlotSpanMetadata::FromSlotStart(
1902 allocator.root()->ObjectToSlotStart(ptr5));
1903 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
1904
1905 // Churn things a little whilst there's a partial slot span freelist.
1906 allocator.root()->Free(ptr);
1907 ptr = allocator.root()->Alloc(big_size, type_name);
1908 void* ptr6 = allocator.root()->Alloc(big_size, type_name);
1909
1910 allocator.root()->Free(ptr);
1911 allocator.root()->Free(ptr2);
1912 allocator.root()->Free(ptr3);
1913 allocator.root()->Free(ptr4);
1914 allocator.root()->Free(ptr5);
1915 allocator.root()->Free(ptr6);
1916 EXPECT_TRUE(slot_span->in_empty_cache());
1917 EXPECT_TRUE(slot_span2->in_empty_cache());
1918 EXPECT_TRUE(slot_span2->get_freelist_head());
1919 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1920
1921 // Size that's just above half a page.
1922 size_t non_dividing_size =
1923 SystemPageSize() / 2 + 1 - ExtraAllocSize(allocator);
1924 bucket_index = SizeToIndex(non_dividing_size + ExtraAllocSize(allocator));
1925 bucket = &allocator.root()->buckets[bucket_index];
1926 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1927
1928 ptr = allocator.root()->Alloc(non_dividing_size, type_name);
1929 EXPECT_TRUE(ptr);
1930
1931 slot_span =
1932 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1933 total_slots =
1934 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1935 bucket->slot_size;
1936
1937 EXPECT_FALSE(slot_span->get_freelist_head());
1938 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1939 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
1940
1941 ptr2 = allocator.root()->Alloc(non_dividing_size, type_name);
1942 EXPECT_TRUE(ptr2);
1943 EXPECT_TRUE(slot_span->get_freelist_head());
1944 EXPECT_EQ(2u, slot_span->num_allocated_slots);
1945 // 2 slots got provisioned: the first one fills the rest of the first (already
1946 // provision page) and exceeds it by just a tad, thus leading to provisioning
1947 // a new page, and the second one fully fits within that new page.
1948 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1949
1950 ptr3 = allocator.root()->Alloc(non_dividing_size, type_name);
1951 EXPECT_TRUE(ptr3);
1952 EXPECT_FALSE(slot_span->get_freelist_head());
1953 EXPECT_EQ(3u, slot_span->num_allocated_slots);
1954 EXPECT_EQ(total_slots - 3, slot_span->num_unprovisioned_slots);
1955
1956 allocator.root()->Free(ptr);
1957 allocator.root()->Free(ptr2);
1958 allocator.root()->Free(ptr3);
1959 EXPECT_TRUE(slot_span->in_empty_cache());
1960 EXPECT_TRUE(slot_span2->get_freelist_head());
1961 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
1962
1963 // And test a couple of sizes that do not cross SystemPageSize() with a
1964 // single allocation.
1965 size_t medium_size = (SystemPageSize() / 2) - ExtraAllocSize(allocator);
1966 bucket_index = SizeToIndex(medium_size + ExtraAllocSize(allocator));
1967 bucket = &allocator.root()->buckets[bucket_index];
1968 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1969
1970 ptr = allocator.root()->Alloc(medium_size, type_name);
1971 EXPECT_TRUE(ptr);
1972 slot_span =
1973 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1974 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1975 total_slots =
1976 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1977 (medium_size + ExtraAllocSize(allocator));
1978 size_t first_slot_span_slots =
1979 SystemPageSize() / (medium_size + ExtraAllocSize(allocator));
1980 EXPECT_EQ(2u, first_slot_span_slots);
1981 EXPECT_EQ(total_slots - first_slot_span_slots,
1982 slot_span->num_unprovisioned_slots);
1983
1984 allocator.root()->Free(ptr);
1985
1986 size_t small_size = (SystemPageSize() / 4) - ExtraAllocSize(allocator);
1987 bucket_index = SizeToIndex(small_size + ExtraAllocSize(allocator));
1988 bucket = &allocator.root()->buckets[bucket_index];
1989 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
1990
1991 ptr = allocator.root()->Alloc(small_size, type_name);
1992 EXPECT_TRUE(ptr);
1993 slot_span =
1994 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
1995 EXPECT_EQ(1u, slot_span->num_allocated_slots);
1996 total_slots =
1997 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
1998 (small_size + ExtraAllocSize(allocator));
1999 first_slot_span_slots =
2000 SystemPageSize() / (small_size + ExtraAllocSize(allocator));
2001 EXPECT_EQ(total_slots - first_slot_span_slots,
2002 slot_span->num_unprovisioned_slots);
2003
2004 allocator.root()->Free(ptr);
2005 EXPECT_TRUE(slot_span->get_freelist_head());
2006 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2007
2008 ASSERT_LT(ExtraAllocSize(allocator), 64u);
2009 size_t very_small_size = (ExtraAllocSize(allocator) <= 32)
2010 ? (32 - ExtraAllocSize(allocator))
2011 : (64 - ExtraAllocSize(allocator));
2012 size_t very_small_adjusted_size =
2013 allocator.root()->AdjustSize0IfNeeded(very_small_size);
2014 bucket_index =
2015 SizeToIndex(very_small_adjusted_size + ExtraAllocSize(allocator));
2016 bucket = &allocator.root()->buckets[bucket_index];
2017 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2018
2019 ptr = allocator.root()->Alloc(very_small_size, type_name);
2020 EXPECT_TRUE(ptr);
2021 slot_span =
2022 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2023 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2024 size_t very_small_actual_size = allocator.root()->GetUsableSize(ptr);
2025 total_slots =
2026 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2027 (very_small_actual_size + ExtraAllocSize(allocator));
2028 first_slot_span_slots =
2029 SystemPageSize() / (very_small_actual_size + ExtraAllocSize(allocator));
2030 EXPECT_EQ(total_slots - first_slot_span_slots,
2031 slot_span->num_unprovisioned_slots);
2032
2033 allocator.root()->Free(ptr);
2034 EXPECT_TRUE(slot_span->get_freelist_head());
2035 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2036
2037 // And try an allocation size (against the generic allocator) that is
2038 // larger than a system page.
2039 size_t page_and_a_half_size =
2040 (SystemPageSize() + (SystemPageSize() / 2)) - ExtraAllocSize(allocator);
2041 ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
2042 EXPECT_TRUE(ptr);
2043 slot_span =
2044 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2045 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2046 // Only the first slot was provisioned, and that's the one that was just
2047 // allocated so the free list is empty.
2048 EXPECT_TRUE(!slot_span->get_freelist_head());
2049 total_slots =
2050 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2051 (page_and_a_half_size + ExtraAllocSize(allocator));
2052 EXPECT_EQ(total_slots - 1, slot_span->num_unprovisioned_slots);
2053 ptr2 = allocator.root()->Alloc(page_and_a_half_size, type_name);
2054 EXPECT_TRUE(ptr);
2055 slot_span =
2056 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2057 EXPECT_EQ(2u, slot_span->num_allocated_slots);
2058 // As above, only one slot was provisioned.
2059 EXPECT_TRUE(!slot_span->get_freelist_head());
2060 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2061 allocator.root()->Free(ptr);
2062 allocator.root()->Free(ptr2);
2063
2064 // And then make sure than exactly the page size only faults one page.
2065 size_t page_size = SystemPageSize() - ExtraAllocSize(allocator);
2066 ptr = allocator.root()->Alloc(page_size, type_name);
2067 EXPECT_TRUE(ptr);
2068 slot_span =
2069 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2070 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2071 EXPECT_TRUE(slot_span->get_freelist_head());
2072 total_slots =
2073 (slot_span->bucket->num_system_pages_per_slot_span * SystemPageSize()) /
2074 (page_size + ExtraAllocSize(allocator));
2075 EXPECT_EQ(total_slots - 2, slot_span->num_unprovisioned_slots);
2076 allocator.root()->Free(ptr);
2077 }
2078
2079 // Test some of the fragmentation-resistant properties of the allocator.
TEST_P(PartitionAllocTest,SlotSpanRefilling)2080 TEST_P(PartitionAllocTest, SlotSpanRefilling) {
2081 PartitionRoot::Bucket* bucket =
2082 &allocator.root()->buckets[test_bucket_index_];
2083
2084 // Grab two full slot spans and a non-full slot span.
2085 auto* slot_span1 = GetFullSlotSpan(kTestAllocSize);
2086 auto* slot_span2 = GetFullSlotSpan(kTestAllocSize);
2087 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2088 EXPECT_TRUE(ptr);
2089 EXPECT_NE(slot_span1, bucket->active_slot_spans_head);
2090 EXPECT_NE(slot_span2, bucket->active_slot_spans_head);
2091 auto* slot_span =
2092 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2093 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2094
2095 // Work out a pointer into slot_span2 and free it; and then slot_span1 and
2096 // free it.
2097 void* ptr2 = allocator.root()->SlotStartToObject(
2098 SlotSpanMetadata::ToSlotSpanStart(slot_span1));
2099 allocator.root()->Free(ptr2);
2100 ptr2 = allocator.root()->SlotStartToObject(
2101 SlotSpanMetadata::ToSlotSpanStart(slot_span2));
2102 allocator.root()->Free(ptr2);
2103
2104 // If we perform two allocations from the same bucket now, we expect to
2105 // refill both the nearly full slot spans.
2106 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2107 std::ignore = allocator.root()->Alloc(kTestAllocSize, type_name);
2108 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2109
2110 FreeFullSlotSpan(allocator.root(), slot_span2);
2111 FreeFullSlotSpan(allocator.root(), slot_span1);
2112 allocator.root()->Free(ptr);
2113 }
2114
2115 // Basic tests to ensure that allocations work for partial page buckets.
TEST_P(PartitionAllocTest,PartialPages)2116 TEST_P(PartitionAllocTest, PartialPages) {
2117 // Find a size that is backed by a partial partition page.
2118 size_t size = sizeof(void*);
2119 size_t bucket_index;
2120
2121 PartitionRoot::Bucket* bucket = nullptr;
2122 constexpr size_t kMaxSize = 4000u;
2123 while (size < kMaxSize) {
2124 bucket_index = SizeToIndex(size + ExtraAllocSize(allocator));
2125 bucket = &allocator.root()->buckets[bucket_index];
2126 if (bucket->num_system_pages_per_slot_span %
2127 NumSystemPagesPerPartitionPage()) {
2128 break;
2129 }
2130 size += sizeof(void*);
2131 }
2132 EXPECT_LT(size, kMaxSize);
2133
2134 auto* slot_span1 = GetFullSlotSpan(size);
2135 auto* slot_span2 = GetFullSlotSpan(size);
2136 FreeFullSlotSpan(allocator.root(), slot_span2);
2137 FreeFullSlotSpan(allocator.root(), slot_span1);
2138 }
2139
2140 // Test correct handling if our mapping collides with another.
TEST_P(PartitionAllocTest,MappingCollision)2141 TEST_P(PartitionAllocTest, MappingCollision) {
2142 size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
2143 // The -2 is because the first and last partition pages in a super page are
2144 // guard pages. We also discount the partition pages used for the tag bitmap.
2145 size_t num_slot_span_needed =
2146 (NumPartitionPagesPerSuperPage() - 2 -
2147 partition_alloc::internal::NumPartitionPagesPerFreeSlotBitmap()) /
2148 num_pages_per_slot_span;
2149 size_t num_partition_pages_needed =
2150 num_slot_span_needed * num_pages_per_slot_span;
2151
2152 auto first_super_page_pages =
2153 std::make_unique<SlotSpanMetadata*[]>(num_partition_pages_needed);
2154 auto second_super_page_pages =
2155 std::make_unique<SlotSpanMetadata*[]>(num_partition_pages_needed);
2156
2157 size_t i;
2158 for (i = 0; i < num_partition_pages_needed; ++i) {
2159 first_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2160 }
2161
2162 uintptr_t slot_span_start =
2163 SlotSpanMetadata::ToSlotSpanStart(first_super_page_pages[0]);
2164 EXPECT_EQ(PartitionPageSize() +
2165 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2166 slot_span_start & kSuperPageOffsetMask);
2167 uintptr_t super_page =
2168 slot_span_start - PartitionPageSize() -
2169 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2170 // Map a single system page either side of the mapping for our allocations,
2171 // with the goal of tripping up alignment of the next mapping.
2172 uintptr_t map1 =
2173 AllocPages(super_page - PageAllocationGranularity(),
2174 PageAllocationGranularity(), PageAllocationGranularity(),
2175 PageAccessibilityConfiguration(
2176 PageAccessibilityConfiguration::kInaccessible),
2177 PageTag::kPartitionAlloc);
2178 EXPECT_TRUE(map1);
2179 uintptr_t map2 =
2180 AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2181 PageAllocationGranularity(),
2182 PageAccessibilityConfiguration(
2183 PageAccessibilityConfiguration::kInaccessible),
2184 PageTag::kPartitionAlloc);
2185 EXPECT_TRUE(map2);
2186
2187 for (i = 0; i < num_partition_pages_needed; ++i) {
2188 second_super_page_pages[i] = GetFullSlotSpan(kTestAllocSize);
2189 }
2190
2191 FreePages(map1, PageAllocationGranularity());
2192 FreePages(map2, PageAllocationGranularity());
2193
2194 super_page = SlotSpanMetadata::ToSlotSpanStart(second_super_page_pages[0]);
2195 EXPECT_EQ(PartitionPageSize() +
2196 partition_alloc::internal::ReservedFreeSlotBitmapSize(),
2197 super_page & kSuperPageOffsetMask);
2198 super_page -= PartitionPageSize() +
2199 partition_alloc::internal::ReservedFreeSlotBitmapSize();
2200 // Map a single system page either side of the mapping for our allocations,
2201 // with the goal of tripping up alignment of the next mapping.
2202 map1 = AllocPages(super_page - PageAllocationGranularity(),
2203 PageAllocationGranularity(), PageAllocationGranularity(),
2204 PageAccessibilityConfiguration(
2205 PageAccessibilityConfiguration::kReadWriteTagged),
2206 PageTag::kPartitionAlloc);
2207 EXPECT_TRUE(map1);
2208 map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(),
2209 PageAllocationGranularity(),
2210 PageAccessibilityConfiguration(
2211 PageAccessibilityConfiguration::kReadWriteTagged),
2212 PageTag::kPartitionAlloc);
2213 EXPECT_TRUE(map2);
2214 EXPECT_TRUE(TrySetSystemPagesAccess(
2215 map1, PageAllocationGranularity(),
2216 PageAccessibilityConfiguration(
2217 PageAccessibilityConfiguration::kInaccessible)));
2218 EXPECT_TRUE(TrySetSystemPagesAccess(
2219 map2, PageAllocationGranularity(),
2220 PageAccessibilityConfiguration(
2221 PageAccessibilityConfiguration::kInaccessible)));
2222
2223 auto* slot_span_in_third_super_page = GetFullSlotSpan(kTestAllocSize);
2224 FreePages(map1, PageAllocationGranularity());
2225 FreePages(map2, PageAllocationGranularity());
2226
2227 EXPECT_EQ(0u,
2228 SlotSpanMetadata::ToSlotSpanStart(slot_span_in_third_super_page) &
2229 PartitionPageOffsetMask());
2230
2231 // And make sure we really did get a page in a new superpage.
2232 EXPECT_NE(SlotSpanMetadata::ToSlotSpanStart(first_super_page_pages[0]) &
2233 kSuperPageBaseMask,
2234 SlotSpanMetadata::ToSlotSpanStart(slot_span_in_third_super_page) &
2235 kSuperPageBaseMask);
2236 EXPECT_NE(SlotSpanMetadata::ToSlotSpanStart(second_super_page_pages[0]) &
2237 kSuperPageBaseMask,
2238 SlotSpanMetadata::ToSlotSpanStart(slot_span_in_third_super_page) &
2239 kSuperPageBaseMask);
2240
2241 FreeFullSlotSpan(allocator.root(), slot_span_in_third_super_page);
2242 for (i = 0; i < num_partition_pages_needed; ++i) {
2243 FreeFullSlotSpan(allocator.root(), first_super_page_pages[i]);
2244 FreeFullSlotSpan(allocator.root(), second_super_page_pages[i]);
2245 }
2246 }
2247
2248 // Tests that slot spans in the free slot span cache do get freed as
2249 // appropriate.
TEST_P(PartitionAllocTest,FreeCache)2250 TEST_P(PartitionAllocTest, FreeCache) {
2251 EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
2252
2253 size_t big_size = 1000 - ExtraAllocSize(allocator);
2254 size_t bucket_index = SizeToIndex(big_size + ExtraAllocSize(allocator));
2255 PartitionBucket* bucket = &allocator.root()->buckets[bucket_index];
2256
2257 void* ptr = allocator.root()->Alloc(big_size, type_name);
2258 EXPECT_TRUE(ptr);
2259 auto* slot_span =
2260 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2261 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2262 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2263 // Lazy commit commits only needed pages.
2264 size_t expected_committed_size =
2265 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
2266 EXPECT_EQ(expected_committed_size,
2267 allocator.root()->get_total_size_of_committed_pages());
2268 allocator.root()->Free(ptr);
2269 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2270 EXPECT_TRUE(slot_span->in_empty_cache());
2271 EXPECT_TRUE(slot_span->get_freelist_head());
2272
2273 ClearEmptySlotSpanCache();
2274
2275 // Flushing the cache should have really freed the unused slot spans.
2276 EXPECT_FALSE(slot_span->get_freelist_head());
2277 EXPECT_FALSE(slot_span->in_empty_cache());
2278 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2279 EXPECT_EQ(0u, allocator.root()->get_total_size_of_committed_pages());
2280
2281 // Check that an allocation works ok whilst in this state (a free'd slot span
2282 // as the active slot spans head).
2283 ptr = allocator.root()->Alloc(big_size, type_name);
2284 EXPECT_FALSE(bucket->empty_slot_spans_head);
2285 allocator.root()->Free(ptr);
2286
2287 // Also check that a slot span that is bouncing immediately between empty and
2288 // used does not get freed.
2289 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
2290 ptr = allocator.root()->Alloc(big_size, type_name);
2291 EXPECT_TRUE(slot_span->get_freelist_head());
2292 allocator.root()->Free(ptr);
2293 EXPECT_TRUE(slot_span->get_freelist_head());
2294 }
2295 EXPECT_EQ(expected_committed_size,
2296 allocator.root()->get_total_size_of_committed_pages());
2297 }
2298
2299 // Tests for a bug we had with losing references to free slot spans.
TEST_P(PartitionAllocTest,LostFreeSlotSpansBug)2300 TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {
2301 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2302
2303 void* ptr = allocator.root()->Alloc(size, type_name);
2304 EXPECT_TRUE(ptr);
2305 void* ptr2 = allocator.root()->Alloc(size, type_name);
2306 EXPECT_TRUE(ptr2);
2307
2308 SlotSpanMetadata* slot_span =
2309 SlotSpanMetadata::FromSlotStart(allocator.root()->ObjectToSlotStart(ptr));
2310 SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
2311 allocator.root()->ObjectToSlotStart(ptr2));
2312 PartitionBucket* bucket = slot_span->bucket;
2313
2314 EXPECT_EQ(nullptr, bucket->empty_slot_spans_head);
2315 EXPECT_EQ(1u, slot_span->num_allocated_slots);
2316 EXPECT_EQ(1u, slot_span2->num_allocated_slots);
2317 EXPECT_TRUE(slot_span->is_full());
2318 EXPECT_TRUE(slot_span2->is_full());
2319 // The first span was kicked out from the active list, but the second one
2320 // wasn't.
2321 EXPECT_TRUE(slot_span->marked_full);
2322 EXPECT_FALSE(slot_span2->marked_full);
2323
2324 allocator.root()->Free(ptr);
2325 allocator.root()->Free(ptr2);
2326
2327 EXPECT_TRUE(bucket->empty_slot_spans_head);
2328 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2329 EXPECT_EQ(0u, slot_span->num_allocated_slots);
2330 EXPECT_EQ(0u, slot_span2->num_allocated_slots);
2331 EXPECT_FALSE(slot_span->is_full());
2332 EXPECT_FALSE(slot_span->is_full());
2333 EXPECT_FALSE(slot_span->marked_full);
2334 EXPECT_FALSE(slot_span2->marked_full);
2335 EXPECT_TRUE(slot_span->get_freelist_head());
2336 EXPECT_TRUE(slot_span2->get_freelist_head());
2337
2338 ClearEmptySlotSpanCache();
2339
2340 EXPECT_FALSE(slot_span->get_freelist_head());
2341 EXPECT_FALSE(slot_span2->get_freelist_head());
2342
2343 EXPECT_TRUE(bucket->empty_slot_spans_head);
2344 EXPECT_TRUE(bucket->empty_slot_spans_head->next_slot_span);
2345 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
2346 bucket->active_slot_spans_head);
2347
2348 // At this moment, we have two decommitted slot spans, on the empty list.
2349 ptr = allocator.root()->Alloc(size, type_name);
2350 EXPECT_TRUE(ptr);
2351 allocator.root()->Free(ptr);
2352
2353 EXPECT_EQ(SlotSpanMetadata::get_sentinel_slot_span(),
2354 bucket->active_slot_spans_head);
2355 EXPECT_TRUE(bucket->empty_slot_spans_head);
2356 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2357
2358 ClearEmptySlotSpanCache();
2359
2360 // We're now set up to trigger a historical bug by scanning over the active
2361 // slot spans list. The current code gets into a different state, but we'll
2362 // keep the test as being an interesting corner case.
2363 ptr = allocator.root()->Alloc(size, type_name);
2364 EXPECT_TRUE(ptr);
2365 allocator.root()->Free(ptr);
2366
2367 EXPECT_TRUE(bucket->is_valid());
2368 EXPECT_TRUE(bucket->empty_slot_spans_head);
2369 EXPECT_TRUE(bucket->decommitted_slot_spans_head);
2370 }
2371
2372 #if defined(PA_HAS_DEATH_TESTS)
2373
2374 // Unit tests that check if an allocation fails in "return null" mode,
2375 // repeating it doesn't crash, and still returns null. The tests need to
2376 // stress memory subsystem limits to do so, hence they try to allocate
2377 // 6 GB of memory, each with a different per-allocation block sizes.
2378 //
2379 // On 64-bit systems we need to restrict the address space to force allocation
2380 // failure, so these tests run only on POSIX systems that provide setrlimit(),
2381 // and use it to limit address space to 6GB.
2382 //
2383 // Disable these tests on Android because, due to the allocation-heavy behavior,
2384 // they tend to get OOM-killed rather than pass.
2385 //
2386 // Disable these test on Windows, since they run slower, so tend to timout and
2387 // cause flake.
2388 #if !BUILDFLAG(IS_WIN) && \
2389 (!defined(ARCH_CPU_64_BITS) || \
2390 (BUILDFLAG(IS_POSIX) && \
2391 !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)))) || \
2392 BUILDFLAG(IS_FUCHSIA)
2393 #define MAYBE_RepeatedAllocReturnNullDirect RepeatedAllocReturnNullDirect
2394 #define MAYBE_RepeatedReallocReturnNullDirect RepeatedReallocReturnNullDirect
2395 #else
2396 #define MAYBE_RepeatedAllocReturnNullDirect \
2397 DISABLED_RepeatedAllocReturnNullDirect
2398 #define MAYBE_RepeatedReallocReturnNullDirect \
2399 DISABLED_RepeatedReallocReturnNullDirect
2400 #endif
2401
2402 // The following four tests wrap a called function in an expect death statement
2403 // to perform their test, because they are non-hermetic. Specifically they are
2404 // going to attempt to exhaust the allocatable memory, which leaves the
2405 // allocator in a bad global state.
2406 // Performing them as death tests causes them to be forked into their own
2407 // process, so they won't pollute other tests.
2408 //
2409 // These tests are *very* slow when BUILDFLAG(PA_DCHECK_IS_ON), because they
2410 // memset() many GiB of data (see crbug.com/1168168).
2411 // TODO(lizeb): make these tests faster.
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedAllocReturnNullDirect)2412 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {
2413 // A direct-mapped allocation size.
2414 size_t direct_map_size = 32 * 1024 * 1024;
2415 ASSERT_GT(direct_map_size, kMaxBucketed);
2416 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionAlloc),
2417 "Passed DoReturnNullTest");
2418 }
2419
2420 // Repeating above test with Realloc
TEST_P(PartitionAllocDeathTest,MAYBE_RepeatedReallocReturnNullDirect)2421 TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {
2422 size_t direct_map_size = 32 * 1024 * 1024;
2423 ASSERT_GT(direct_map_size, kMaxBucketed);
2424 EXPECT_DEATH(DoReturnNullTest(direct_map_size, kPartitionRealloc),
2425 "Passed DoReturnNullTest");
2426 }
2427
2428 // TODO(crbug.com/1348221) re-enable the tests below, once the allocator
2429 // actually returns nullptr for non direct-mapped allocations.
2430 // When doing so, they will need to be made MAYBE_ like those above.
2431 //
2432 // Tests "return null" with a 512 kB block size.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedAllocReturnNull)2433 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {
2434 // A single-slot but non-direct-mapped allocation size.
2435 size_t single_slot_size = 512 * 1024;
2436 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2437 ASSERT_LE(single_slot_size, kMaxBucketed);
2438 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionAlloc),
2439 "Passed DoReturnNullTest");
2440 }
2441
2442 // Repeating above test with Realloc.
TEST_P(PartitionAllocDeathTest,DISABLED_RepeatedReallocReturnNull)2443 TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {
2444 size_t single_slot_size = 512 * 1024;
2445 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2446 ASSERT_LE(single_slot_size, kMaxBucketed);
2447 EXPECT_DEATH(DoReturnNullTest(single_slot_size, kPartitionRealloc),
2448 "Passed DoReturnNullTest");
2449 }
2450
2451 #if BUILDFLAG(HAS_MEMORY_TAGGING)
2452 // Check that Arm's memory tagging extension (MTE) is correctly protecting
2453 // freed pointers. Writes to a free pointer should result in a crash.
TEST_P(PartitionAllocDeathTest,MTEProtectsFreedPtr)2454 TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
2455 base::CPU cpu;
2456 if (!cpu.has_mte()) {
2457 // This test won't pass on systems without MTE.
2458 GTEST_SKIP();
2459 }
2460
2461 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
2462 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
2463
2464 // Make an arbitrary-sized small allocation.
2465 size_t alloc_size = 64 - ExtraAllocSize(allocator);
2466 uint64_t* ptr =
2467 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
2468 EXPECT_TRUE(ptr);
2469
2470 // Check that the allocation's writable.
2471 *ptr = kCookie;
2472
2473 // Invalidate ptr by freeing it.
2474 allocator.root()->Free(ptr);
2475
2476 // Writing to ptr after free() should crash
2477 EXPECT_EXIT(
2478 {
2479 // Should be in synchronous MTE mode for running this test.
2480 *ptr = kQuarantined;
2481 },
2482 testing::KilledBySignal(SIGSEGV), "");
2483 }
2484 #endif // BUILDFLAG(HAS_MEMORY_TAGGING)
2485
2486 // Make sure that malloc(-1) dies.
2487 // In the past, we had an integer overflow that would alias malloc(-1) to
2488 // malloc(0), which is not good.
TEST_P(PartitionAllocDeathTest,LargeAllocs)2489 TEST_P(PartitionAllocDeathTest, LargeAllocs) {
2490 // Largest alloc.
2491 EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
2492 // And the smallest allocation we expect to die.
2493 // TODO(bartekn): Separate into its own test, as it wouldn't run (same below).
2494 EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
2495 }
2496
2497 // These tests don't work deterministically when BRP is enabled on certain
2498 // architectures. On Free(), BRP's ref-count inside in-slot metadata gets
2499 // overwritten by an encoded freelist pointer. On little-endian 64-bit
2500 // architectures, this happens to be always an even number, which will trigger
2501 // BRP's own CHECK (sic!). On other architectures, it's likely to be an odd
2502 // number >1, which will fool BRP into thinking the memory isn't freed and still
2503 // referenced, thus making it quarantine it and return early, before
2504 // PA_CHECK(slot_start != freelist_head) is reached.
2505 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2506 (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2507
2508 // Check that our immediate double-free detection works.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree)2509 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {
2510 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2511 EXPECT_TRUE(ptr);
2512 allocator.root()->Free(ptr);
2513 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2514 }
2515
2516 // As above, but when this isn't the only slot in the span.
TEST_P(PartitionAllocDeathTest,ImmediateDoubleFree2ndSlot)2517 TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {
2518 void* ptr0 = allocator.root()->Alloc(kTestAllocSize, type_name);
2519 EXPECT_TRUE(ptr0);
2520 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2521 EXPECT_TRUE(ptr);
2522 allocator.root()->Free(ptr);
2523 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2524 allocator.root()->Free(ptr0);
2525 }
2526
2527 // Check that our double-free detection based on |num_allocated_slots| not going
2528 // below 0 works.
2529 //
2530 // Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
2531 // protection wouldn't work when there is another slot present in the span. It
2532 // will prevent |num_allocated_slots| from going below 0.
TEST_P(PartitionAllocDeathTest,NumAllocatedSlotsDoubleFree)2533 TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {
2534 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2535 EXPECT_TRUE(ptr);
2536 void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
2537 EXPECT_TRUE(ptr2);
2538 allocator.root()->Free(ptr);
2539 allocator.root()->Free(ptr2);
2540 // This is not an immediate double-free so our immediate detection won't
2541 // fire. However, it does take |num_allocated_slots| to -1, which is illegal
2542 // and should be trapped.
2543 EXPECT_DEATH(allocator.root()->Free(ptr), "");
2544 }
2545
2546 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
2547 // (BUILDFLAG(HAS_64_BIT_POINTERS) && defined(ARCH_CPU_LITTLE_ENDIAN))
2548
2549 // Check that guard pages are present where expected.
TEST_P(PartitionAllocDeathTest,DirectMapGuardPages)2550 TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {
2551 const size_t kSizes[] = {
2552 kMaxBucketed + ExtraAllocSize(allocator) + 1,
2553 kMaxBucketed + SystemPageSize(), kMaxBucketed + PartitionPageSize(),
2554 partition_alloc::internal::base::bits::AlignUp(
2555 kMaxBucketed + kSuperPageSize, kSuperPageSize) -
2556 PartitionRoot::GetDirectMapMetadataAndGuardPagesSize()};
2557 for (size_t size : kSizes) {
2558 ASSERT_GT(size, kMaxBucketed);
2559 size -= ExtraAllocSize(allocator);
2560 EXPECT_GT(size, kMaxBucketed)
2561 << "allocation not large enough for direct allocation";
2562 void* ptr = allocator.root()->Alloc(size, type_name);
2563
2564 EXPECT_TRUE(ptr);
2565 char* char_ptr = static_cast<char*>(ptr) - kPointerOffset;
2566
2567 EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
2568 EXPECT_DEATH(*(char_ptr + partition_alloc::internal::base::bits::AlignUp(
2569 size, SystemPageSize())) = 'A',
2570 "");
2571
2572 allocator.root()->Free(ptr);
2573 }
2574 }
2575
2576 // These tests rely on precise layout. They handle cookie, not in-slot metadata.
2577 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
2578 PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2579
TEST_P(PartitionAllocDeathTest,UseAfterFreeDetection)2580 TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
2581 base::CPU cpu;
2582 void* data = allocator.root()->Alloc(100);
2583 allocator.root()->Free(data);
2584
2585 // use after free, not crashing here, but the next allocation should crash,
2586 // since we corrupted the freelist.
2587 memset(data, 0x42, 100);
2588 EXPECT_DEATH(allocator.root()->Alloc(100), "");
2589 }
2590
TEST_P(PartitionAllocDeathTest,FreelistCorruption)2591 TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
2592 base::CPU cpu;
2593 const size_t alloc_size = 2 * sizeof(void*);
2594 void** fake_freelist_entry =
2595 static_cast<void**>(allocator.root()->Alloc(alloc_size));
2596 fake_freelist_entry[0] = nullptr;
2597 fake_freelist_entry[1] = nullptr;
2598
2599 void** uaf_data = static_cast<void**>(allocator.root()->Alloc(alloc_size));
2600 allocator.root()->Free(uaf_data);
2601 // Try to confuse the allocator. This is still easy to circumvent willingly,
2602 // "just" need to set uaf_data[1] to ~uaf_data[0].
2603 void* previous_uaf_data = uaf_data[0];
2604 uaf_data[0] = fake_freelist_entry;
2605 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2606
2607 // Restore the freelist entry value, otherwise freelist corruption is detected
2608 // in TearDown(), crashing this process.
2609 uaf_data[0] = previous_uaf_data;
2610
2611 allocator.root()->Free(fake_freelist_entry);
2612 }
2613
2614 // With BUILDFLAG(PA_DCHECK_IS_ON), cookie already handles off-by-one detection.
2615 #if !BUILDFLAG(PA_DCHECK_IS_ON)
TEST_P(PartitionAllocDeathTest,OffByOneDetection)2616 TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
2617 base::CPU cpu;
2618 const size_t alloc_size = 2 * sizeof(void*);
2619 char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size));
2620 if (cpu.has_mte()) {
2621 EXPECT_DEATH(array[alloc_size] = 'A', "");
2622 } else {
2623 char previous_value = array[alloc_size];
2624 // volatile is required to prevent the compiler from getting too clever and
2625 // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
2626 // annotation tells the compiler (among other things) that the returned
2627 // value cannot alias anything.
2628 *const_cast<volatile char*>(&array[alloc_size]) = 'A';
2629 // Crash at the next allocation. This assumes that we are touching a new,
2630 // non-randomized slot span, where the next slot to be handed over to the
2631 // application directly follows the current one.
2632 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2633
2634 // Restore integrity, otherwise the process will crash in TearDown().
2635 array[alloc_size] = previous_value;
2636 }
2637 }
2638
TEST_P(PartitionAllocDeathTest,OffByOneDetectionWithRealisticData)2639 TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
2640 base::CPU cpu;
2641 const size_t alloc_size = 2 * sizeof(void*);
2642 void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size));
2643 char valid;
2644 if (cpu.has_mte()) {
2645 EXPECT_DEATH(array[2] = &valid, "");
2646 } else {
2647 void* previous_value = array[2];
2648 // As above, needs volatile to convince the compiler to perform the write.
2649 *const_cast<void* volatile*>(&array[2]) = &valid;
2650 // Crash at the next allocation. This assumes that we are touching a new,
2651 // non-randomized slot span, where the next slot to be handed over to the
2652 // application directly follows the current one.
2653 EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
2654 array[2] = previous_value;
2655 }
2656 }
2657 #endif // !BUILDFLAG(PA_DCHECK_IS_ON)
2658
2659 #endif // !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
2660 // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
2661
2662 #endif // !defined(PA_HAS_DEATH_TESTS)
2663
2664 // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
2665 // crashing and return non-zero values when memory is allocated.
TEST_P(PartitionAllocTest,DumpMemoryStats)2666 TEST_P(PartitionAllocTest, DumpMemoryStats) {
2667 {
2668 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
2669 MockPartitionStatsDumper mock_stats_dumper;
2670 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2671 &mock_stats_dumper);
2672 EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded());
2673 allocator.root()->Free(ptr);
2674 }
2675
2676 // This series of tests checks the active -> empty -> decommitted states.
2677 {
2678 {
2679 void* ptr =
2680 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name);
2681 MockPartitionStatsDumper dumper;
2682 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2683 &dumper);
2684 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2685
2686 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2687 EXPECT_TRUE(stats);
2688 EXPECT_TRUE(stats->is_valid);
2689 EXPECT_EQ(2048u, stats->bucket_slot_size);
2690 EXPECT_EQ(2048u, stats->active_bytes);
2691 EXPECT_EQ(1u, stats->active_count);
2692 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2693 EXPECT_EQ(0u, stats->decommittable_bytes);
2694 EXPECT_EQ(0u, stats->discardable_bytes);
2695 EXPECT_EQ(0u, stats->num_full_slot_spans);
2696 EXPECT_EQ(1u, stats->num_active_slot_spans);
2697 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2698 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2699 allocator.root()->Free(ptr);
2700 }
2701
2702 {
2703 MockPartitionStatsDumper dumper;
2704 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2705 &dumper);
2706 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2707
2708 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2709 EXPECT_TRUE(stats);
2710 EXPECT_TRUE(stats->is_valid);
2711 EXPECT_EQ(2048u, stats->bucket_slot_size);
2712 EXPECT_EQ(0u, stats->active_bytes);
2713 EXPECT_EQ(0u, stats->active_count);
2714 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2715 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2716 EXPECT_EQ(0u, stats->discardable_bytes);
2717 EXPECT_EQ(0u, stats->num_full_slot_spans);
2718 EXPECT_EQ(0u, stats->num_active_slot_spans);
2719 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2720 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2721 }
2722
2723 // TODO(crbug.com/722911): Commenting this out causes this test to fail when
2724 // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
2725 // when run with the others (--gtest_filter=PartitionAllocTest.*).
2726 ClearEmptySlotSpanCache();
2727
2728 {
2729 MockPartitionStatsDumper dumper;
2730 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2731 &dumper);
2732 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2733
2734 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2735 EXPECT_TRUE(stats);
2736 EXPECT_TRUE(stats->is_valid);
2737 EXPECT_EQ(2048u, stats->bucket_slot_size);
2738 EXPECT_EQ(0u, stats->active_bytes);
2739 EXPECT_EQ(0u, stats->active_count);
2740 EXPECT_EQ(0u, stats->resident_bytes);
2741 EXPECT_EQ(0u, stats->decommittable_bytes);
2742 EXPECT_EQ(0u, stats->discardable_bytes);
2743 EXPECT_EQ(0u, stats->num_full_slot_spans);
2744 EXPECT_EQ(0u, stats->num_active_slot_spans);
2745 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2746 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2747 }
2748 }
2749
2750 // This test checks for correct empty slot span list accounting.
2751 {
2752 size_t size = PartitionPageSize() - ExtraAllocSize(allocator);
2753 void* ptr1 = allocator.root()->Alloc(size, type_name);
2754 void* ptr2 = allocator.root()->Alloc(size, type_name);
2755 allocator.root()->Free(ptr1);
2756 allocator.root()->Free(ptr2);
2757
2758 ClearEmptySlotSpanCache();
2759
2760 ptr1 = allocator.root()->Alloc(size, type_name);
2761
2762 {
2763 MockPartitionStatsDumper dumper;
2764 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2765 &dumper);
2766 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2767
2768 const PartitionBucketMemoryStats* stats =
2769 dumper.GetBucketStats(PartitionPageSize());
2770 EXPECT_TRUE(stats);
2771 EXPECT_TRUE(stats->is_valid);
2772 EXPECT_EQ(PartitionPageSize(), stats->bucket_slot_size);
2773 EXPECT_EQ(PartitionPageSize(), stats->active_bytes);
2774 EXPECT_EQ(1u, stats->active_count);
2775 EXPECT_EQ(PartitionPageSize(), stats->resident_bytes);
2776 EXPECT_EQ(0u, stats->decommittable_bytes);
2777 EXPECT_EQ(0u, stats->discardable_bytes);
2778 EXPECT_EQ(1u, stats->num_full_slot_spans);
2779 EXPECT_EQ(0u, stats->num_active_slot_spans);
2780 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2781 EXPECT_EQ(1u, stats->num_decommitted_slot_spans);
2782 }
2783 allocator.root()->Free(ptr1);
2784 }
2785
2786 // This test checks for correct direct mapped accounting.
2787 {
2788 size_t size_smaller = kMaxBucketed + 1;
2789 size_t size_bigger = (kMaxBucketed * 2) + 1;
2790 size_t real_size_smaller =
2791 (size_smaller + SystemPageOffsetMask()) & SystemPageBaseMask();
2792 size_t real_size_bigger =
2793 (size_bigger + SystemPageOffsetMask()) & SystemPageBaseMask();
2794 void* ptr = allocator.root()->Alloc(size_smaller, type_name);
2795 void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2796
2797 {
2798 MockPartitionStatsDumper dumper;
2799 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2800 &dumper);
2801 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2802
2803 const PartitionBucketMemoryStats* stats =
2804 dumper.GetBucketStats(real_size_smaller);
2805 EXPECT_TRUE(stats);
2806 EXPECT_TRUE(stats->is_valid);
2807 EXPECT_TRUE(stats->is_direct_map);
2808 EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
2809 EXPECT_EQ(real_size_smaller, stats->active_bytes);
2810 EXPECT_EQ(1u, stats->active_count);
2811 EXPECT_EQ(real_size_smaller, stats->resident_bytes);
2812 EXPECT_EQ(0u, stats->decommittable_bytes);
2813 EXPECT_EQ(0u, stats->discardable_bytes);
2814 EXPECT_EQ(1u, stats->num_full_slot_spans);
2815 EXPECT_EQ(0u, stats->num_active_slot_spans);
2816 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2817 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2818
2819 stats = dumper.GetBucketStats(real_size_bigger);
2820 EXPECT_TRUE(stats);
2821 EXPECT_TRUE(stats->is_valid);
2822 EXPECT_TRUE(stats->is_direct_map);
2823 EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
2824 EXPECT_EQ(real_size_bigger, stats->active_bytes);
2825 EXPECT_EQ(1u, stats->active_count);
2826 EXPECT_EQ(real_size_bigger, stats->resident_bytes);
2827 EXPECT_EQ(0u, stats->decommittable_bytes);
2828 EXPECT_EQ(0u, stats->discardable_bytes);
2829 EXPECT_EQ(1u, stats->num_full_slot_spans);
2830 EXPECT_EQ(0u, stats->num_active_slot_spans);
2831 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2832 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2833 }
2834
2835 allocator.root()->Free(ptr2);
2836 allocator.root()->Free(ptr);
2837
2838 // Whilst we're here, allocate again and free with different ordering to
2839 // give a workout to our linked list code.
2840 ptr = allocator.root()->Alloc(size_smaller, type_name);
2841 ptr2 = allocator.root()->Alloc(size_bigger, type_name);
2842 allocator.root()->Free(ptr);
2843 allocator.root()->Free(ptr2);
2844 }
2845
2846 // This test checks large-but-not-quite-direct allocations.
2847 {
2848 size_t requested_size = 16 * SystemPageSize() + 1;
2849 void* ptr = allocator.root()->Alloc(requested_size, type_name);
2850
2851 {
2852 MockPartitionStatsDumper dumper;
2853 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2854 &dumper);
2855 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2856
2857 size_t slot_size = SizeToBucketSize(requested_size);
2858 const PartitionBucketMemoryStats* stats =
2859 dumper.GetBucketStats(slot_size);
2860 ASSERT_TRUE(stats);
2861 EXPECT_TRUE(stats->is_valid);
2862 EXPECT_FALSE(stats->is_direct_map);
2863 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2864 EXPECT_EQ(requested_size + ExtraAllocSize(allocator),
2865 stats->active_bytes);
2866 EXPECT_EQ(1u, stats->active_count);
2867 EXPECT_EQ(slot_size, stats->resident_bytes);
2868 EXPECT_EQ(0u, stats->decommittable_bytes);
2869 EXPECT_EQ(
2870 base::bits::AlignDown(slot_size - requested_size, SystemPageSize()),
2871 stats->discardable_bytes);
2872 EXPECT_EQ(1u, stats->num_full_slot_spans);
2873 EXPECT_EQ(0u, stats->num_active_slot_spans);
2874 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2875 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2876 }
2877
2878 allocator.root()->Free(ptr);
2879
2880 {
2881 MockPartitionStatsDumper dumper;
2882 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2883 &dumper);
2884 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2885
2886 size_t slot_size = SizeToBucketSize(requested_size);
2887 const PartitionBucketMemoryStats* stats =
2888 dumper.GetBucketStats(slot_size);
2889 EXPECT_TRUE(stats);
2890 EXPECT_TRUE(stats->is_valid);
2891 EXPECT_FALSE(stats->is_direct_map);
2892 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2893 EXPECT_EQ(0u, stats->active_bytes);
2894 EXPECT_EQ(0u, stats->active_count);
2895 EXPECT_EQ(slot_size, stats->resident_bytes);
2896 EXPECT_EQ(slot_size, stats->decommittable_bytes);
2897 EXPECT_EQ(0u, stats->num_full_slot_spans);
2898 EXPECT_EQ(0u, stats->num_active_slot_spans);
2899 EXPECT_EQ(1u, stats->num_empty_slot_spans);
2900 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2901 }
2902
2903 requested_size = 17 * SystemPageSize() + 1;
2904 void* ptr2 = allocator.root()->Alloc(requested_size, type_name);
2905 EXPECT_EQ(ptr, ptr2);
2906
2907 {
2908 MockPartitionStatsDumper dumper;
2909 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2910 &dumper);
2911 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
2912
2913 size_t slot_size = SizeToBucketSize(requested_size);
2914 const PartitionBucketMemoryStats* stats =
2915 dumper.GetBucketStats(slot_size);
2916 EXPECT_TRUE(stats);
2917 EXPECT_TRUE(stats->is_valid);
2918 EXPECT_FALSE(stats->is_direct_map);
2919 EXPECT_EQ(slot_size, stats->bucket_slot_size);
2920 EXPECT_EQ(requested_size + ExtraAllocSize(allocator),
2921 stats->active_bytes);
2922 EXPECT_EQ(1u, stats->active_count);
2923 EXPECT_EQ(slot_size, stats->resident_bytes);
2924 EXPECT_EQ(0u, stats->decommittable_bytes);
2925 EXPECT_EQ(
2926 base::bits::AlignDown(slot_size - requested_size, SystemPageSize()),
2927 stats->discardable_bytes);
2928 EXPECT_EQ(1u, stats->num_full_slot_spans);
2929 EXPECT_EQ(0u, stats->num_active_slot_spans);
2930 EXPECT_EQ(0u, stats->num_empty_slot_spans);
2931 EXPECT_EQ(0u, stats->num_decommitted_slot_spans);
2932 }
2933
2934 allocator.root()->Free(ptr2);
2935 }
2936 }
2937
2938 // Tests the API to purge freeable memory.
TEST_P(PartitionAllocTest,Purge)2939 TEST_P(PartitionAllocTest, Purge) {
2940 char* ptr = static_cast<char*>(
2941 allocator.root()->Alloc(2048 - ExtraAllocSize(allocator), type_name));
2942 allocator.root()->Free(ptr);
2943 {
2944 MockPartitionStatsDumper dumper;
2945 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2946 &dumper);
2947 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2948
2949 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2950 EXPECT_TRUE(stats);
2951 EXPECT_TRUE(stats->is_valid);
2952 EXPECT_EQ(SystemPageSize(), stats->decommittable_bytes);
2953 EXPECT_EQ(SystemPageSize(), stats->resident_bytes);
2954 }
2955 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2956 {
2957 MockPartitionStatsDumper dumper;
2958 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
2959 &dumper);
2960 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
2961
2962 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
2963 EXPECT_TRUE(stats);
2964 EXPECT_TRUE(stats->is_valid);
2965 EXPECT_EQ(0u, stats->decommittable_bytes);
2966 EXPECT_EQ(0u, stats->resident_bytes);
2967 }
2968 // Calling purge again here is a good way of testing we didn't mess up the
2969 // state of the free cache ring.
2970 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2971
2972 // A single-slot but non-direct-mapped allocation size.
2973 size_t single_slot_size = 512 * 1024;
2974 ASSERT_GT(single_slot_size, MaxRegularSlotSpanSize());
2975 ASSERT_LE(single_slot_size, kMaxBucketed);
2976 char* big_ptr =
2977 static_cast<char*>(allocator.root()->Alloc(single_slot_size, type_name));
2978 allocator.root()->Free(big_ptr);
2979 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
2980
2981 CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
2982 CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
2983 }
2984
2985 // Tests that we prefer to allocate into a non-empty partition page over an
2986 // empty one. This is an important aspect of minimizing memory usage for some
2987 // allocation sizes, particularly larger ones.
TEST_P(PartitionAllocTest,PreferActiveOverEmpty)2988 TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {
2989 size_t size = (SystemPageSize() * 2) - ExtraAllocSize(allocator);
2990 // Allocate 3 full slot spans worth of 8192-byte allocations.
2991 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
2992 // slots.
2993 void* ptr1 = allocator.root()->Alloc(size, type_name);
2994 void* ptr2 = allocator.root()->Alloc(size, type_name);
2995 void* ptr3 = allocator.root()->Alloc(size, type_name);
2996 void* ptr4 = allocator.root()->Alloc(size, type_name);
2997 void* ptr5 = allocator.root()->Alloc(size, type_name);
2998 void* ptr6 = allocator.root()->Alloc(size, type_name);
2999
3000 SlotSpanMetadata* slot_span1 = SlotSpanMetadata::FromSlotStart(
3001 allocator.root()->ObjectToSlotStart(ptr1));
3002 SlotSpanMetadata* slot_span2 = SlotSpanMetadata::FromSlotStart(
3003 allocator.root()->ObjectToSlotStart(ptr3));
3004 SlotSpanMetadata* slot_span3 = SlotSpanMetadata::FromSlotStart(
3005 allocator.root()->ObjectToSlotStart(ptr6));
3006 EXPECT_NE(slot_span1, slot_span2);
3007 EXPECT_NE(slot_span2, slot_span3);
3008 PartitionBucket* bucket = slot_span1->bucket;
3009 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
3010
3011 // Free up the 2nd slot in each slot span.
3012 // This leaves the active list containing 3 slot spans, each with 1 used and 1
3013 // free slot. The active slot span will be the one containing ptr1.
3014 allocator.root()->Free(ptr6);
3015 allocator.root()->Free(ptr4);
3016 allocator.root()->Free(ptr2);
3017 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
3018
3019 // Empty the middle slot span in the active list.
3020 allocator.root()->Free(ptr3);
3021 EXPECT_EQ(slot_span1, bucket->active_slot_spans_head);
3022
3023 // Empty the first slot span in the active list -- also the current slot span.
3024 allocator.root()->Free(ptr1);
3025
3026 // A good choice here is to re-fill the third slot span since the first two
3027 // are empty. We used to fail that.
3028 void* ptr7 = allocator.root()->Alloc(size, type_name);
3029 PA_EXPECT_PTR_EQ(ptr6, ptr7);
3030 EXPECT_EQ(slot_span3, bucket->active_slot_spans_head);
3031
3032 allocator.root()->Free(ptr5);
3033 allocator.root()->Free(ptr7);
3034 }
3035
3036 // Tests the API to purge discardable memory.
TEST_P(PartitionAllocTest,PurgeDiscardableSecondPage)3037 TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {
3038 // Free the second of two 4096 byte allocations and then purge.
3039 void* ptr1 = allocator.root()->Alloc(
3040 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3041 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
3042 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3043 allocator.root()->Free(ptr2);
3044 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3045 allocator.root()->ObjectToSlotStart(ptr1));
3046 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3047 {
3048 MockPartitionStatsDumper dumper;
3049 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3050 &dumper);
3051 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3052
3053 const PartitionBucketMemoryStats* stats =
3054 dumper.GetBucketStats(SystemPageSize());
3055 EXPECT_TRUE(stats);
3056 EXPECT_TRUE(stats->is_valid);
3057 EXPECT_EQ(0u, stats->decommittable_bytes);
3058 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3059 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3060 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3061 }
3062 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
3063 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3064 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
3065 EXPECT_EQ(3u, slot_span->num_unprovisioned_slots);
3066
3067 allocator.root()->Free(ptr1);
3068 }
3069
TEST_P(PartitionAllocTest,PurgeDiscardableFirstPage)3070 TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {
3071 // Free the first of two 4096 byte allocations and then purge.
3072 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3073 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3074 void* ptr2 = allocator.root()->Alloc(
3075 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3076 allocator.root()->Free(ptr1);
3077 {
3078 MockPartitionStatsDumper dumper;
3079 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3080 &dumper);
3081 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3082
3083 const PartitionBucketMemoryStats* stats =
3084 dumper.GetBucketStats(SystemPageSize());
3085 EXPECT_TRUE(stats);
3086 EXPECT_TRUE(stats->is_valid);
3087 EXPECT_EQ(0u, stats->decommittable_bytes);
3088 #if BUILDFLAG(IS_WIN)
3089 EXPECT_EQ(0u, stats->discardable_bytes);
3090 #else
3091 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3092 #endif
3093 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3094 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3095 }
3096 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3097 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3098 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3099
3100 allocator.root()->Free(ptr2);
3101 }
3102
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAlloc)3103 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {
3104 const size_t requested_size = 2.5 * SystemPageSize();
3105 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3106 requested_size - ExtraAllocSize(allocator), type_name));
3107 void* ptr2 = allocator.root()->Alloc(
3108 requested_size - ExtraAllocSize(allocator), type_name);
3109 void* ptr3 = allocator.root()->Alloc(
3110 requested_size - ExtraAllocSize(allocator), type_name);
3111 void* ptr4 = allocator.root()->Alloc(
3112 requested_size - ExtraAllocSize(allocator), type_name);
3113 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3114 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3115 allocator.root()->Free(ptr1);
3116 allocator.root()->Free(ptr2);
3117 {
3118 MockPartitionStatsDumper dumper;
3119 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3120 &dumper);
3121 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3122
3123 const PartitionBucketMemoryStats* stats =
3124 dumper.GetBucketStats(requested_size);
3125 EXPECT_TRUE(stats);
3126 EXPECT_TRUE(stats->is_valid);
3127 EXPECT_EQ(0u, stats->decommittable_bytes);
3128 #if BUILDFLAG(IS_WIN)
3129 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3130 #else
3131 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3132 #endif
3133 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3134 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3135 }
3136 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3137 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3138 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3139 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3140 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3141 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3142 // Except for Windows, the first page is discardable because the freelist
3143 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3144 // checks for Linux and ChromeOS, not for Windows.
3145 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
3146 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3147 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3148 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3149 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3150
3151 allocator.root()->Free(ptr3);
3152 allocator.root()->Free(ptr4);
3153 }
3154
TEST_P(PartitionAllocTest,PurgeDiscardableNonPageSizedAllocOnSlotBoundary)3155 TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {
3156 const size_t requested_size = 2.5 * SystemPageSize();
3157 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3158 requested_size - ExtraAllocSize(allocator), type_name));
3159 void* ptr2 = allocator.root()->Alloc(
3160 requested_size - ExtraAllocSize(allocator), type_name);
3161 void* ptr3 = allocator.root()->Alloc(
3162 requested_size - ExtraAllocSize(allocator), type_name);
3163 void* ptr4 = allocator.root()->Alloc(
3164 requested_size - ExtraAllocSize(allocator), type_name);
3165 memset(ptr1, 'A', requested_size - ExtraAllocSize(allocator));
3166 memset(ptr2, 'A', requested_size - ExtraAllocSize(allocator));
3167 allocator.root()->Free(ptr2);
3168 allocator.root()->Free(ptr1);
3169 {
3170 MockPartitionStatsDumper dumper;
3171 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3172 &dumper);
3173 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3174
3175 const PartitionBucketMemoryStats* stats =
3176 dumper.GetBucketStats(requested_size);
3177 EXPECT_TRUE(stats);
3178 EXPECT_TRUE(stats->is_valid);
3179 EXPECT_EQ(0u, stats->decommittable_bytes);
3180 #if BUILDFLAG(IS_WIN)
3181 EXPECT_EQ(3 * SystemPageSize(), stats->discardable_bytes);
3182 #else
3183 EXPECT_EQ(4 * SystemPageSize(), stats->discardable_bytes);
3184 #endif
3185 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3186 EXPECT_EQ(10 * SystemPageSize(), stats->resident_bytes);
3187 }
3188 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3189 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3190 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3191 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3192 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), true);
3193 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3194 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3195 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3196 // Except for Windows, the third page is discardable because the freelist
3197 // pointer on this page is nullptr. Note that CHECK_PAGE_IN_CORE only executes
3198 // checks for Linux and ChromeOS, not for Windows.
3199 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3200 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3201 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 4), false);
3202
3203 allocator.root()->Free(ptr3);
3204 allocator.root()->Free(ptr4);
3205 }
3206
TEST_P(PartitionAllocTest,PurgeDiscardableManyPages)3207 TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {
3208 // On systems with large pages, use less pages because:
3209 // 1) There must be a bucket for kFirstAllocPages * SystemPageSize(), and
3210 // 2) On low-end systems, using too many large pages can OOM during the test
3211 const bool kHasLargePages = SystemPageSize() > 4096;
3212 const size_t kFirstAllocPages = kHasLargePages ? 32 : 64;
3213 const size_t kSecondAllocPages = kHasLargePages ? 31 : 61;
3214
3215 // Detect case (1) from above.
3216 PA_DCHECK(kFirstAllocPages * SystemPageSize() < (1UL << kMaxBucketedOrder));
3217
3218 const size_t kDeltaPages = kFirstAllocPages - kSecondAllocPages;
3219
3220 {
3221 ScopedPageAllocation p(allocator, kFirstAllocPages);
3222 p.TouchAllPages();
3223 }
3224
3225 ScopedPageAllocation p(allocator, kSecondAllocPages);
3226
3227 MockPartitionStatsDumper dumper;
3228 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3229 &dumper);
3230 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3231
3232 const PartitionBucketMemoryStats* stats =
3233 dumper.GetBucketStats(kFirstAllocPages * SystemPageSize());
3234 EXPECT_TRUE(stats);
3235 EXPECT_TRUE(stats->is_valid);
3236 EXPECT_EQ(0u, stats->decommittable_bytes);
3237 EXPECT_EQ(kDeltaPages * SystemPageSize(), stats->discardable_bytes);
3238 EXPECT_EQ(kSecondAllocPages * SystemPageSize(), stats->active_bytes);
3239 EXPECT_EQ(kFirstAllocPages * SystemPageSize(), stats->resident_bytes);
3240
3241 for (size_t i = 0; i < kFirstAllocPages; i++) {
3242 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3243 }
3244
3245 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3246
3247 for (size_t i = 0; i < kSecondAllocPages; i++) {
3248 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), true);
3249 }
3250 for (size_t i = kSecondAllocPages; i < kFirstAllocPages; i++) {
3251 CHECK_PAGE_IN_CORE(p.PageAtIndex(i), false);
3252 }
3253 }
3254
TEST_P(PartitionAllocTest,PurgeDiscardableWithFreeListStraightening)3255 TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListStraightening) {
3256 // This sub-test tests truncation of the provisioned slots in a trickier
3257 // case where the freelist is rewritten.
3258 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3259 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3260 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3261 void* ptr2 = allocator.root()->Alloc(
3262 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3263 void* ptr3 = allocator.root()->Alloc(
3264 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3265 void* ptr4 = allocator.root()->Alloc(
3266 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3267 ptr1[0] = 'A';
3268 ptr1[SystemPageSize()] = 'A';
3269 ptr1[SystemPageSize() * 2] = 'A';
3270 ptr1[SystemPageSize() * 3] = 'A';
3271 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3272 allocator.root()->ObjectToSlotStart(ptr1));
3273 allocator.root()->Free(ptr2);
3274 allocator.root()->Free(ptr4);
3275 allocator.root()->Free(ptr1);
3276 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3277
3278 {
3279 MockPartitionStatsDumper dumper;
3280 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3281 &dumper);
3282 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3283
3284 const PartitionBucketMemoryStats* stats =
3285 dumper.GetBucketStats(SystemPageSize());
3286 EXPECT_TRUE(stats);
3287 EXPECT_TRUE(stats->is_valid);
3288 EXPECT_EQ(0u, stats->decommittable_bytes);
3289 #if BUILDFLAG(IS_WIN)
3290 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3291 #else
3292 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3293 #endif
3294 EXPECT_EQ(SystemPageSize(), stats->active_bytes);
3295 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3296 }
3297 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3298 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3299 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3300 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3301 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3302 EXPECT_EQ(1u, slot_span->num_unprovisioned_slots);
3303 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3304 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3305 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3306 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3307
3308 // Let's check we didn't brick the freelist.
3309 void* ptr1b = allocator.root()->Alloc(
3310 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3311 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3312 void* ptr2b = allocator.root()->Alloc(
3313 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3314 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3315 EXPECT_FALSE(slot_span->get_freelist_head()); // ptr4 was unprovisioned
3316 void* ptr4b = allocator.root()->Alloc(
3317 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3318 PA_EXPECT_PTR_EQ(ptr4, ptr4b);
3319 EXPECT_FALSE(slot_span->get_freelist_head());
3320
3321 // Free objects such that they're in this order on the list:
3322 // head -> ptr2 -> ptr3 -> ptr1
3323 // However, ptr4 is still unfreed preventing any unprovisioning.
3324 allocator.root()->Free(ptr1);
3325 allocator.root()->Free(ptr3);
3326 allocator.root()->Free(ptr2);
3327 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3328 // The test by default runs in
3329 // StraightenLargerSlotSpanFreeListsMode::kOnlyWhenUnprovisioning mode, so the
3330 // freelist wasn't modified, and the allocations will happen in LIFO order.
3331 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3332 type_name);
3333 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3334 void* ptr3b = allocator.root()->Alloc(
3335 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3336 PA_EXPECT_PTR_EQ(ptr3, ptr3b);
3337 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3338 type_name);
3339 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3340 EXPECT_FALSE(slot_span->get_freelist_head());
3341
3342 // Free objects such that they're in this order on the list:
3343 // head -> ptr2 -> ptr3 -> ptr1
3344 // However, ptr4 is still unfreed preventing any unprovisioning.
3345 allocator.root()->Free(ptr1);
3346 allocator.root()->Free(ptr3);
3347 allocator.root()->Free(ptr2);
3348 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
3349 StraightenLargerSlotSpanFreeListsMode::kAlways);
3350 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3351 // In StraightenLargerSlotSpanFreeListsMode::kAlways mode, the freelist is
3352 // ordered from left to right.
3353 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3354 type_name);
3355 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3356 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3357 type_name);
3358 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3359 ptr3b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3360 type_name);
3361 PA_EXPECT_PTR_EQ(ptr3, ptr3b);
3362 EXPECT_FALSE(slot_span->get_freelist_head());
3363
3364 // Free objects such that they're in this order on the list:
3365 // head -> ptr2 -> ptr4 -> ptr1
3366 // ptr3 is still unfreed preventing unprovisioning of ptr1 and ptr2, but not
3367 // ptr4.
3368 allocator.root()->Free(ptr1);
3369 allocator.root()->Free(ptr4);
3370 allocator.root()->Free(ptr2);
3371 PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
3372 StraightenLargerSlotSpanFreeListsMode::kNever);
3373 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3374 // In StraightenLargerSlotSpanFreeListsMode::kNever mode, unprovisioned
3375 // entries willbe removed form the freelist but the list won't be reordered.
3376 ptr2b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3377 type_name);
3378 PA_EXPECT_PTR_EQ(ptr2, ptr2b);
3379 ptr1b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3380 type_name);
3381 PA_EXPECT_PTR_EQ(ptr1, ptr1b);
3382 EXPECT_FALSE(slot_span->get_freelist_head());
3383 ptr4b = allocator.root()->Alloc(SystemPageSize() - ExtraAllocSize(allocator),
3384 type_name);
3385 PA_EXPECT_PTR_EQ(ptr4, ptr4b);
3386 EXPECT_FALSE(slot_span->get_freelist_head());
3387
3388 // Clean up.
3389 allocator.root()->Free(ptr1);
3390 allocator.root()->Free(ptr2);
3391 allocator.root()->Free(ptr3);
3392 allocator.root()->Free(ptr4);
3393 }
3394
TEST_P(PartitionAllocTest,PurgeDiscardableDoubleTruncateFreeList)3395 TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {
3396 // This sub-test is similar, but tests a double-truncation.
3397 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
3398 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3399 SystemPageSize() - ExtraAllocSize(allocator), type_name));
3400 void* ptr2 = allocator.root()->Alloc(
3401 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3402 void* ptr3 = allocator.root()->Alloc(
3403 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3404 void* ptr4 = allocator.root()->Alloc(
3405 SystemPageSize() - ExtraAllocSize(allocator), type_name);
3406 ptr1[0] = 'A';
3407 ptr1[SystemPageSize()] = 'A';
3408 ptr1[SystemPageSize() * 2] = 'A';
3409 ptr1[SystemPageSize() * 3] = 'A';
3410 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3411 allocator.root()->ObjectToSlotStart(ptr1));
3412 allocator.root()->Free(ptr4);
3413 allocator.root()->Free(ptr3);
3414 EXPECT_EQ(0u, slot_span->num_unprovisioned_slots);
3415
3416 {
3417 MockPartitionStatsDumper dumper;
3418 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3419 &dumper);
3420 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3421
3422 const PartitionBucketMemoryStats* stats =
3423 dumper.GetBucketStats(SystemPageSize());
3424 EXPECT_TRUE(stats);
3425 EXPECT_TRUE(stats->is_valid);
3426 EXPECT_EQ(0u, stats->decommittable_bytes);
3427 EXPECT_EQ(2 * SystemPageSize(), stats->discardable_bytes);
3428 EXPECT_EQ(2 * SystemPageSize(), stats->active_bytes);
3429 EXPECT_EQ(4 * SystemPageSize(), stats->resident_bytes);
3430 }
3431 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3432 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3433 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), true);
3434 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), true);
3435 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3436 EXPECT_EQ(2u, slot_span->num_unprovisioned_slots);
3437 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3438 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3439 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 2), false);
3440 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (SystemPageSize() * 3), false);
3441
3442 EXPECT_FALSE(slot_span->get_freelist_head());
3443
3444 allocator.root()->Free(ptr1);
3445 allocator.root()->Free(ptr2);
3446 }
3447
TEST_P(PartitionAllocTest,PurgeDiscardableSmallSlotsWithTruncate)3448 TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {
3449 size_t requested_size = 0.5 * SystemPageSize();
3450 char* ptr1 = static_cast<char*>(allocator.root()->Alloc(
3451 requested_size - ExtraAllocSize(allocator), type_name));
3452 void* ptr2 = allocator.root()->Alloc(
3453 requested_size - ExtraAllocSize(allocator), type_name);
3454 void* ptr3 = allocator.root()->Alloc(
3455 requested_size - ExtraAllocSize(allocator), type_name);
3456 void* ptr4 = allocator.root()->Alloc(
3457 requested_size - ExtraAllocSize(allocator), type_name);
3458 allocator.root()->Free(ptr3);
3459 allocator.root()->Free(ptr4);
3460 SlotSpanMetadata* slot_span = SlotSpanMetadata::FromSlotStart(
3461 allocator.root()->ObjectToSlotStart(ptr1));
3462 EXPECT_EQ(4u, slot_span->num_unprovisioned_slots);
3463 {
3464 MockPartitionStatsDumper dumper;
3465 allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
3466 &dumper);
3467 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
3468
3469 const PartitionBucketMemoryStats* stats =
3470 dumper.GetBucketStats(requested_size);
3471 EXPECT_TRUE(stats);
3472 EXPECT_TRUE(stats->is_valid);
3473 EXPECT_EQ(0u, stats->decommittable_bytes);
3474 EXPECT_EQ(SystemPageSize(), stats->discardable_bytes);
3475 EXPECT_EQ(requested_size * 2, stats->active_bytes);
3476 EXPECT_EQ(2 * SystemPageSize(), stats->resident_bytes);
3477 }
3478 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3479 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), true);
3480 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
3481 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
3482 CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + SystemPageSize(), false);
3483 EXPECT_EQ(6u, slot_span->num_unprovisioned_slots);
3484
3485 allocator.root()->Free(ptr1);
3486 allocator.root()->Free(ptr2);
3487 }
3488
TEST_P(PartitionAllocTest,ActiveListMaintenance)3489 TEST_P(PartitionAllocTest, ActiveListMaintenance) {
3490 size_t size = SystemPageSize() - ExtraAllocSize(allocator);
3491 size_t real_size = size + ExtraAllocSize(allocator);
3492 size_t bucket_index =
3493 allocator.root()->SizeToBucketIndex(real_size, GetBucketDistribution());
3494 PartitionRoot::Bucket* bucket = &allocator.root()->buckets[bucket_index];
3495 ASSERT_EQ(bucket->slot_size, real_size);
3496 size_t slots_per_span = bucket->num_system_pages_per_slot_span;
3497
3498 // Make 10 full slot spans.
3499 constexpr int kSpans = 10;
3500 std::vector<std::vector<void*>> allocated_memory_spans(kSpans);
3501 for (int span_index = 0; span_index < kSpans; span_index++) {
3502 for (size_t i = 0; i < slots_per_span; i++) {
3503 allocated_memory_spans[span_index].push_back(
3504 allocator.root()->Alloc(size));
3505 }
3506 }
3507
3508 // Free one entry in the middle span, creating a partial slot span.
3509 constexpr size_t kSpanIndex = 5;
3510 allocator.root()->Free(allocated_memory_spans[kSpanIndex].back());
3511 allocated_memory_spans[kSpanIndex].pop_back();
3512
3513 // Empty the last slot span.
3514 for (void* ptr : allocated_memory_spans[kSpans - 1]) {
3515 allocator.root()->Free(ptr);
3516 }
3517 allocated_memory_spans.pop_back();
3518
3519 // The active list now is:
3520 // Partial -> Empty -> Full -> Full -> ... -> Full
3521 bucket->MaintainActiveList();
3522
3523 // Only one entry in the active list.
3524 ASSERT_NE(bucket->active_slot_spans_head,
3525 SlotSpanMetadata::get_sentinel_slot_span());
3526 EXPECT_FALSE(bucket->active_slot_spans_head->next_slot_span);
3527
3528 // The empty list has 1 entry.
3529 ASSERT_NE(bucket->empty_slot_spans_head,
3530 SlotSpanMetadata::get_sentinel_slot_span());
3531 EXPECT_FALSE(bucket->empty_slot_spans_head->next_slot_span);
3532
3533 // The rest are full slot spans.
3534 EXPECT_EQ(8u, bucket->num_full_slot_spans);
3535
3536 // Free all memory.
3537 for (const auto& span : allocated_memory_spans) {
3538 for (void* ptr : span) {
3539 allocator.root()->Free(ptr);
3540 }
3541 }
3542 }
3543
TEST_P(PartitionAllocTest,ReallocMovesCookie)3544 TEST_P(PartitionAllocTest, ReallocMovesCookie) {
3545 // Resize so as to be sure to hit a "resize in place" case, and ensure that
3546 // use of the entire result is compatible with the debug mode's cookie, even
3547 // when the bucket size is large enough to span more than one partition page
3548 // and we can track the "raw" size. See https://crbug.com/709271
3549 static const size_t kSize = MaxRegularSlotSpanSize();
3550 void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
3551 EXPECT_TRUE(ptr);
3552
3553 memset(ptr, 0xbd, kSize + 1);
3554 ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
3555 EXPECT_TRUE(ptr);
3556
3557 memset(ptr, 0xbd, kSize + 2);
3558 allocator.root()->Free(ptr);
3559 }
3560
TEST_P(PartitionAllocTest,SmallReallocDoesNotMoveTrailingCookie)3561 TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
3562 // For crbug.com/781473
3563 static constexpr size_t kSize = 264;
3564 void* ptr = allocator.root()->Alloc(kSize, type_name);
3565 EXPECT_TRUE(ptr);
3566
3567 ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
3568 EXPECT_TRUE(ptr);
3569
3570 allocator.root()->Free(ptr);
3571 }
3572
TEST_P(PartitionAllocTest,ZeroFill)3573 TEST_P(PartitionAllocTest, ZeroFill) {
3574 static constexpr size_t kAllZerosSentinel =
3575 std::numeric_limits<size_t>::max();
3576 for (size_t size : kTestSizes) {
3577 char* p = static_cast<char*>(
3578 allocator.root()->Alloc<AllocFlags::kZeroFill>(size));
3579 size_t non_zero_position = kAllZerosSentinel;
3580 for (size_t i = 0; i < size; ++i) {
3581 if (0 != p[i]) {
3582 non_zero_position = i;
3583 break;
3584 }
3585 }
3586 EXPECT_EQ(kAllZerosSentinel, non_zero_position)
3587 << "test allocation size: " << size;
3588 allocator.root()->Free(p);
3589 }
3590
3591 for (int i = 0; i < 10; ++i) {
3592 SCOPED_TRACE(i);
3593 AllocateRandomly<AllocFlags::kZeroFill>(allocator.root(), 250);
3594 }
3595 }
3596
TEST_P(PartitionAllocTest,SchedulerLoopQuarantine)3597 TEST_P(PartitionAllocTest, SchedulerLoopQuarantine) {
3598 LightweightQuarantineBranch& branch =
3599 allocator.root()->GetSchedulerLoopQuarantineBranchForTesting();
3600
3601 constexpr size_t kCapacityInBytes = std::numeric_limits<size_t>::max();
3602 size_t original_capacity_in_bytes = branch.GetRoot().GetCapacityInBytes();
3603 branch.GetRoot().SetCapacityInBytes(kCapacityInBytes);
3604
3605 for (size_t size : kTestSizes) {
3606 SCOPED_TRACE(size);
3607
3608 void* object = allocator.root()->Alloc(size);
3609 allocator.root()->Free<FreeFlags::kSchedulerLoopQuarantine>(object);
3610
3611 ASSERT_TRUE(branch.IsQuarantinedForTesting(object));
3612 }
3613
3614 for (int i = 0; i < 10; ++i) {
3615 SCOPED_TRACE(i);
3616 AllocateRandomly<AllocFlags::kNone, FreeFlags::kSchedulerLoopQuarantine>(
3617 allocator.root(), 250);
3618 }
3619
3620 branch.Purge();
3621 branch.GetRoot().SetCapacityInBytes(original_capacity_in_bytes);
3622 }
3623
3624 // Ensures `Free<kSchedulerLoopQuarantine>` works as `Free<kNone>` if disabled.
3625 // See: https://crbug.com/324994233.
TEST_P(PartitionAllocTest,SchedulerLoopQuarantineDisabled)3626 TEST_P(PartitionAllocTest, SchedulerLoopQuarantineDisabled) {
3627 PartitionOptions opts = GetCommonPartitionOptions();
3628 opts.scheduler_loop_quarantine = PartitionOptions::kDisabled;
3629 opts.thread_cache = PartitionOptions::kDisabled;
3630 opts.star_scan_quarantine = PartitionOptions::kDisallowed;
3631 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(opts, {});
3632
3633 // This allocation is required to prevent slot span from being empty and
3634 // decomitted.
3635 void* ptr_to_keep_slot_span = root->Alloc(kTestAllocSize, type_name);
3636 void* ptr = root->Alloc(kTestAllocSize, type_name);
3637
3638 auto* slot_span =
3639 SlotSpanMetadata::FromSlotStart(root->ObjectToSlotStart(ptr));
3640 root->Free<FreeFlags::kSchedulerLoopQuarantine>(ptr);
3641
3642 // The object should be freed immediately.
3643 EXPECT_EQ(root->ObjectToSlotStart(ptr),
3644 UntagPtr(slot_span->get_freelist_head()));
3645
3646 root->Free(ptr_to_keep_slot_span);
3647 }
3648
TEST_P(PartitionAllocTest,ZapOnFree)3649 TEST_P(PartitionAllocTest, ZapOnFree) {
3650 void* ptr = allocator.root()->Alloc(1, type_name);
3651 EXPECT_TRUE(ptr);
3652 memset(ptr, 'A', 1);
3653 allocator.root()->Free<FreeFlags::kZap>(ptr);
3654 // Accessing memory after free requires a retag.
3655 ptr = TagPtr(ptr);
3656 EXPECT_NE('A', *static_cast<unsigned char*>(ptr));
3657
3658 constexpr size_t size = 1024;
3659 ptr = allocator.root()->Alloc(size, type_name);
3660 EXPECT_TRUE(ptr);
3661 memset(ptr, 'A', size);
3662 allocator.root()->Free<FreeFlags::kZap>(ptr);
3663 // Accessing memory after free requires a retag.
3664 ptr = TagPtr(ptr);
3665 EXPECT_NE('A', *static_cast<unsigned char*>(ptr));
3666 EXPECT_EQ(kFreedByte,
3667 *(static_cast<unsigned char*>(ptr) + 2 * sizeof(void*)));
3668 EXPECT_EQ(kFreedByte, *(static_cast<unsigned char*>(ptr) + size - 1));
3669 }
3670
TEST_P(PartitionAllocTest,Bug_897585)3671 TEST_P(PartitionAllocTest, Bug_897585) {
3672 // Need sizes big enough to be direct mapped and a delta small enough to
3673 // allow re-use of the slot span when cookied. These numbers fall out of the
3674 // test case in the indicated bug.
3675 size_t kInitialSize = 983050;
3676 size_t kDesiredSize = 983100;
3677 ASSERT_GT(kInitialSize, kMaxBucketed);
3678 ASSERT_GT(kDesiredSize, kMaxBucketed);
3679 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kInitialSize);
3680 ASSERT_NE(nullptr, ptr);
3681 ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, kDesiredSize,
3682 nullptr);
3683 ASSERT_NE(nullptr, ptr);
3684 memset(ptr, 0xbd, kDesiredSize);
3685 allocator.root()->Free(ptr);
3686 }
3687
TEST_P(PartitionAllocTest,OverrideHooks)3688 TEST_P(PartitionAllocTest, OverrideHooks) {
3689 constexpr size_t kOverriddenSize = 1234;
3690 constexpr const char* kOverriddenType = "Overridden type";
3691 constexpr unsigned char kOverriddenChar = 'A';
3692
3693 // Marked static so that we can use them in non-capturing lambdas below.
3694 // (Non-capturing lambdas convert directly to function pointers.)
3695 static volatile bool free_called = false;
3696 static void* overridden_allocation = nullptr;
3697 overridden_allocation = malloc(kOverriddenSize);
3698 memset(overridden_allocation, kOverriddenChar, kOverriddenSize);
3699
3700 PartitionAllocHooks::SetOverrideHooks(
3701 [](void** out, AllocFlags flags, size_t size,
3702 const char* type_name) -> bool {
3703 if (size == kOverriddenSize && type_name == kOverriddenType) {
3704 *out = overridden_allocation;
3705 return true;
3706 }
3707 return false;
3708 },
3709 [](void* address) -> bool {
3710 if (address == overridden_allocation) {
3711 free_called = true;
3712 return true;
3713 }
3714 return false;
3715 },
3716 [](size_t* out, void* address) -> bool {
3717 if (address == overridden_allocation) {
3718 *out = kOverriddenSize;
3719 return true;
3720 }
3721 return false;
3722 });
3723
3724 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(kOverriddenSize,
3725 kOverriddenType);
3726 ASSERT_EQ(ptr, overridden_allocation);
3727
3728 allocator.root()->Free(ptr);
3729 EXPECT_TRUE(free_called);
3730
3731 // overridden_allocation has not actually been freed so we can now immediately
3732 // realloc it.
3733 free_called = false;
3734 ptr = allocator.root()->Realloc<AllocFlags::kReturnNull>(ptr, 1, nullptr);
3735 ASSERT_NE(ptr, nullptr);
3736 EXPECT_NE(ptr, overridden_allocation);
3737 EXPECT_TRUE(free_called);
3738 EXPECT_EQ(*(char*)ptr, kOverriddenChar);
3739 allocator.root()->Free(ptr);
3740
3741 PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
3742 free(overridden_allocation);
3743 }
3744
TEST_P(PartitionAllocTest,Alignment)3745 TEST_P(PartitionAllocTest, Alignment) {
3746 std::vector<void*> allocated_ptrs;
3747
3748 for (size_t size = 1; size <= PartitionPageSize(); size <<= 1) {
3749 if (size <= ExtraAllocSize(allocator)) {
3750 continue;
3751 }
3752 size_t requested_size = size - ExtraAllocSize(allocator);
3753
3754 // All allocations which are not direct-mapped occupy contiguous slots of a
3755 // span, starting on a page boundary. This means that allocations are first
3756 // rounded up to the nearest bucket size, then have an address of the form:
3757 // (partition-page-aligned address) + i * bucket_size.
3758 //
3759 // All powers of two are bucket sizes, meaning that all power of two
3760 // allocations smaller than a page will be aligned on the allocation size.
3761 size_t expected_alignment = size;
3762 for (int index = 0; index < 3; index++) {
3763 void* ptr = allocator.root()->Alloc(requested_size);
3764 allocated_ptrs.push_back(ptr);
3765 EXPECT_EQ(0u,
3766 allocator.root()->ObjectToSlotStart(ptr) % expected_alignment)
3767 << (index + 1) << "-th allocation of size=" << size;
3768 }
3769 }
3770
3771 for (void* ptr : allocated_ptrs) {
3772 allocator.root()->Free(ptr);
3773 }
3774 }
3775
TEST_P(PartitionAllocTest,FundamentalAlignment)3776 TEST_P(PartitionAllocTest, FundamentalAlignment) {
3777 // See the test above for details. Essentially, checking the bucket size is
3778 // sufficient to ensure that alignment will always be respected, as long as
3779 // the fundamental alignment is <= 16 bytes.
3780 size_t fundamental_alignment = kAlignment;
3781 for (size_t size = 0; size < SystemPageSize(); size++) {
3782 // Allocate several pointers, as the first one in use in a size class will
3783 // be aligned on a page boundary.
3784 void* ptr = allocator.root()->Alloc(size);
3785 void* ptr2 = allocator.root()->Alloc(size);
3786 void* ptr3 = allocator.root()->Alloc(size);
3787
3788 EXPECT_EQ(UntagPtr(ptr) % fundamental_alignment, 0u);
3789 EXPECT_EQ(UntagPtr(ptr2) % fundamental_alignment, 0u);
3790 EXPECT_EQ(UntagPtr(ptr3) % fundamental_alignment, 0u);
3791
3792 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
3793 // The capacity(C) is slot size - ExtraAllocSize(allocator).
3794 // Since slot size is multiples of kAlignment,
3795 // C % kAlignment == (slot_size - ExtraAllocSize(allocator)) % kAlignment.
3796 // C % kAlignment == (-ExtraAllocSize(allocator)) % kAlignment.
3797 // Since kCookieSize is a multiple of kAlignment,
3798 // C % kAlignment == (-kInSlotMetadataBufferSize) % kAlignment
3799 // == (kAlignment - kInSlotMetadataBufferSize) % kAlignment.
3800 EXPECT_EQ(allocator.root()->AllocationCapacityFromSlotStart(slot_start) %
3801 fundamental_alignment,
3802 UseBRPPool()
3803 ? (-ExtraAllocSize(allocator) % fundamental_alignment)
3804 : 0);
3805
3806 allocator.root()->Free(ptr);
3807 allocator.root()->Free(ptr2);
3808 allocator.root()->Free(ptr3);
3809 }
3810 }
3811
VerifyAlignment(PartitionRoot * root,size_t size,size_t alignment)3812 void VerifyAlignment(PartitionRoot* root, size_t size, size_t alignment) {
3813 std::vector<void*> allocated_ptrs;
3814
3815 for (int index = 0; index < 3; index++) {
3816 void* ptr = root->AlignedAlloc(alignment, size);
3817 ASSERT_TRUE(ptr);
3818 allocated_ptrs.push_back(ptr);
3819 EXPECT_EQ(0ull, UntagPtr(ptr) % alignment)
3820 << (index + 1) << "-th allocation of size=" << size
3821 << ", alignment=" << alignment;
3822 }
3823
3824 for (void* ptr : allocated_ptrs) {
3825 root->Free(ptr);
3826 }
3827 }
3828
TEST_P(PartitionAllocTest,AlignedAllocations)3829 TEST_P(PartitionAllocTest, AlignedAllocations) {
3830 size_t alloc_sizes[] = {1,
3831 10,
3832 100,
3833 1000,
3834 10000,
3835 60000,
3836 70000,
3837 130000,
3838 500000,
3839 900000,
3840 kMaxBucketed + 1,
3841 2 * kMaxBucketed,
3842 kSuperPageSize - 2 * PartitionPageSize(),
3843 4 * kMaxBucketed};
3844 for (size_t alloc_size : alloc_sizes) {
3845 for (size_t alignment = 1; alignment <= kMaxSupportedAlignment;
3846 alignment <<= 1) {
3847 VerifyAlignment(allocator.root(), alloc_size, alignment);
3848 }
3849 }
3850 }
3851
3852 // Test that the optimized `GetSlotNumber` implementation produces valid
3853 // results.
TEST_P(PartitionAllocTest,OptimizedGetSlotNumber)3854 TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {
3855 for (size_t i = 0; i < kNumBuckets; ++i) {
3856 auto& bucket = allocator.root()->buckets[i];
3857 if (SizeToIndex(bucket.slot_size) != i) {
3858 continue;
3859 }
3860 for (size_t slot = 0, offset = 0; slot < bucket.get_slots_per_span();
3861 ++slot, offset += bucket.slot_size) {
3862 EXPECT_EQ(slot, bucket.GetSlotNumber(offset));
3863 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size / 2));
3864 EXPECT_EQ(slot, bucket.GetSlotNumber(offset + bucket.slot_size - 1));
3865 }
3866 }
3867 }
3868
TEST_P(PartitionAllocTest,GetUsableSizeNull)3869 TEST_P(PartitionAllocTest, GetUsableSizeNull) {
3870 EXPECT_EQ(0ULL, PartitionRoot::GetUsableSize(nullptr));
3871 }
3872
TEST_P(PartitionAllocTest,GetUsableSize)3873 TEST_P(PartitionAllocTest, GetUsableSize) {
3874 #if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
3875 allocator.root()->EnableMac11MallocSizeHackForTesting();
3876 #endif
3877 size_t delta = 31;
3878 for (size_t size = 1; size <= kMinDirectMappedDownsize; size += delta) {
3879 void* ptr = allocator.root()->Alloc(size);
3880 EXPECT_TRUE(ptr);
3881 size_t usable_size = PartitionRoot::GetUsableSize(ptr);
3882 size_t usable_size_with_hack =
3883 PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
3884 #if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
3885 if (size != internal::kMac11MallocSizeHackRequestedSize)
3886 #endif
3887 EXPECT_EQ(usable_size_with_hack, usable_size);
3888 EXPECT_LE(size, usable_size);
3889 memset(ptr, 0xDE, usable_size);
3890 // Should not crash when free the ptr.
3891 allocator.root()->Free(ptr);
3892 }
3893 }
3894
3895 #if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
TEST_P(PartitionAllocTest,GetUsableSizeWithMac11MallocSizeHack)3896 TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
3897 if (internal::base::mac::MacOSMajorVersion() != 11) {
3898 GTEST_SKIP() << "Skipping because the test is for Mac11.";
3899 }
3900
3901 allocator.root()->EnableMac11MallocSizeHackForTesting();
3902 size_t size = internal::kMac11MallocSizeHackRequestedSize;
3903 void* ptr = allocator.root()->Alloc(size);
3904 size_t usable_size = PartitionRoot::GetUsableSize(ptr);
3905 size_t usable_size_with_hack =
3906 PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
3907 EXPECT_EQ(usable_size,
3908 allocator.root()->settings.mac11_malloc_size_hack_usable_size_);
3909 EXPECT_EQ(usable_size_with_hack, size);
3910
3911 allocator.root()->Free(ptr);
3912 }
3913 #endif // PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
3914
TEST_P(PartitionAllocTest,Bookkeeping)3915 TEST_P(PartitionAllocTest, Bookkeeping) {
3916 auto& root = *allocator.root();
3917
3918 EXPECT_EQ(0U, root.total_size_of_committed_pages);
3919 EXPECT_EQ(0U, root.max_size_of_committed_pages);
3920 EXPECT_EQ(0U, root.get_total_size_of_allocated_bytes());
3921 EXPECT_EQ(0U, root.get_max_size_of_allocated_bytes());
3922 EXPECT_EQ(0U, root.total_size_of_super_pages);
3923 size_t small_size = 1000;
3924
3925 // A full slot span of size 1 partition page is committed.
3926 void* ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3927 // Lazy commit commits only needed pages.
3928 size_t expected_committed_size =
3929 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3930 size_t expected_super_pages_size = kSuperPageSize;
3931 size_t expected_max_committed_size = expected_committed_size;
3932 size_t bucket_index = SizeToIndex(small_size - ExtraAllocSize(allocator));
3933 PartitionBucket* bucket = &root.buckets[bucket_index];
3934 size_t expected_total_allocated_size = bucket->slot_size;
3935 size_t expected_max_allocated_size = expected_total_allocated_size;
3936
3937 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3938 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3939 EXPECT_EQ(expected_total_allocated_size,
3940 root.get_total_size_of_allocated_bytes());
3941 EXPECT_EQ(expected_max_allocated_size,
3942 root.get_max_size_of_allocated_bytes());
3943 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3944
3945 // Freeing memory doesn't result in decommitting pages right away.
3946 root.Free(ptr);
3947 expected_total_allocated_size = 0U;
3948 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3949 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3950 EXPECT_EQ(expected_total_allocated_size,
3951 root.get_total_size_of_allocated_bytes());
3952 EXPECT_EQ(expected_max_allocated_size,
3953 root.get_max_size_of_allocated_bytes());
3954 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3955
3956 // Allocating the same size lands it in the same slot span.
3957 ptr = root.Alloc(small_size - ExtraAllocSize(allocator), type_name);
3958 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3959 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3960 EXPECT_EQ(expected_max_allocated_size,
3961 root.get_max_size_of_allocated_bytes());
3962 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3963
3964 // Freeing memory doesn't result in decommitting pages right away.
3965 root.Free(ptr);
3966 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3967 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3968 EXPECT_EQ(expected_max_allocated_size,
3969 root.get_max_size_of_allocated_bytes());
3970 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3971
3972 // Allocating another size commits another slot span.
3973 ptr = root.Alloc(2 * small_size - ExtraAllocSize(allocator), type_name);
3974 expected_committed_size +=
3975 kUseLazyCommit ? SystemPageSize() : PartitionPageSize();
3976 expected_max_committed_size =
3977 std::max(expected_max_committed_size, expected_committed_size);
3978 expected_max_allocated_size =
3979 std::max(expected_max_allocated_size, static_cast<size_t>(2048));
3980 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3981 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3982 EXPECT_EQ(expected_max_allocated_size,
3983 root.get_max_size_of_allocated_bytes());
3984 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3985
3986 // Freeing memory doesn't result in decommitting pages right away.
3987 root.Free(ptr);
3988 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
3989 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
3990 EXPECT_EQ(expected_max_allocated_size,
3991 root.get_max_size_of_allocated_bytes());
3992 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
3993
3994 // Single-slot slot spans...
3995 //
3996 // When the system page size is larger than 4KiB, we don't necessarily have
3997 // enough space in the superpage to store two of the largest bucketed
3998 // allocations, particularly when we reserve extra space for e.g. bitmaps.
3999 // To avoid this, we use something just below kMaxBucketed.
4000 size_t big_size = kMaxBucketed * 4 / 5 - SystemPageSize();
4001
4002 ASSERT_GT(big_size, MaxRegularSlotSpanSize());
4003 ASSERT_LE(big_size, kMaxBucketed);
4004 bucket_index = SizeToIndex(big_size - ExtraAllocSize(allocator));
4005 bucket = &root.buckets[bucket_index];
4006 // Assert the allocation doesn't fill the entire span nor entire partition
4007 // page, to make the test more interesting.
4008 ASSERT_LT(big_size, bucket->get_bytes_per_span());
4009 ASSERT_NE(big_size % PartitionPageSize(), 0U);
4010 ptr = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4011 expected_committed_size += bucket->get_bytes_per_span();
4012 expected_max_committed_size =
4013 std::max(expected_max_committed_size, expected_committed_size);
4014 expected_total_allocated_size += bucket->get_bytes_per_span();
4015 expected_max_allocated_size =
4016 std::max(expected_max_allocated_size, expected_total_allocated_size);
4017 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4018 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4019 EXPECT_EQ(expected_total_allocated_size,
4020 root.get_total_size_of_allocated_bytes());
4021 EXPECT_EQ(expected_max_allocated_size,
4022 root.get_max_size_of_allocated_bytes());
4023 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4024
4025 // Allocating 2nd time doesn't overflow the super page...
4026 void* ptr2 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4027 expected_committed_size += bucket->get_bytes_per_span();
4028 expected_max_committed_size =
4029 std::max(expected_max_committed_size, expected_committed_size);
4030 expected_total_allocated_size += bucket->get_bytes_per_span();
4031 expected_max_allocated_size =
4032 std::max(expected_max_allocated_size, expected_total_allocated_size);
4033 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4034 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4035 EXPECT_EQ(expected_total_allocated_size,
4036 root.get_total_size_of_allocated_bytes());
4037 EXPECT_EQ(expected_max_allocated_size,
4038 root.get_max_size_of_allocated_bytes());
4039 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4040
4041 // ... but 3rd time does.
4042 void* ptr3 = root.Alloc(big_size - ExtraAllocSize(allocator), type_name);
4043 expected_committed_size += bucket->get_bytes_per_span();
4044 expected_max_committed_size =
4045 std::max(expected_max_committed_size, expected_committed_size);
4046 expected_total_allocated_size += bucket->get_bytes_per_span();
4047 expected_max_allocated_size =
4048 std::max(expected_max_allocated_size, expected_total_allocated_size);
4049 expected_super_pages_size += kSuperPageSize;
4050 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4051 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4052 EXPECT_EQ(expected_total_allocated_size,
4053 root.get_total_size_of_allocated_bytes());
4054 EXPECT_EQ(expected_max_allocated_size,
4055 root.get_max_size_of_allocated_bytes());
4056 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4057
4058 // Freeing memory doesn't result in decommitting pages right away.
4059 root.Free(ptr);
4060 root.Free(ptr2);
4061 root.Free(ptr3);
4062 expected_total_allocated_size -= 3 * bucket->get_bytes_per_span();
4063 expected_max_allocated_size =
4064 std::max(expected_max_allocated_size, expected_total_allocated_size);
4065 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4066 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4067 EXPECT_EQ(expected_total_allocated_size,
4068 root.get_total_size_of_allocated_bytes());
4069 EXPECT_EQ(expected_max_allocated_size,
4070 root.get_max_size_of_allocated_bytes());
4071 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4072
4073 // Now everything should be decommitted. The reserved space for super pages
4074 // stays the same and will never go away (by design).
4075 root.PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
4076 expected_committed_size = 0;
4077 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4078 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4079 EXPECT_EQ(expected_total_allocated_size,
4080 root.get_total_size_of_allocated_bytes());
4081 EXPECT_EQ(expected_max_allocated_size,
4082 root.get_max_size_of_allocated_bytes());
4083 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4084
4085 // None of the above should affect the direct map space.
4086 EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
4087
4088 size_t huge_sizes[] = {
4089 kMaxBucketed + SystemPageSize(),
4090 kMaxBucketed + SystemPageSize() + 123,
4091 kSuperPageSize - PageAllocationGranularity(),
4092 kSuperPageSize - SystemPageSize() - PartitionPageSize(),
4093 kSuperPageSize - PartitionPageSize(),
4094 kSuperPageSize - SystemPageSize(),
4095 kSuperPageSize,
4096 kSuperPageSize + SystemPageSize(),
4097 kSuperPageSize + PartitionPageSize(),
4098 kSuperPageSize + SystemPageSize() + PartitionPageSize(),
4099 kSuperPageSize + PageAllocationGranularity(),
4100 kSuperPageSize + DirectMapAllocationGranularity(),
4101 };
4102 size_t alignments[] = {
4103 PartitionPageSize(),
4104 2 * PartitionPageSize(),
4105 kMaxSupportedAlignment / 2,
4106 kMaxSupportedAlignment,
4107 };
4108 for (size_t huge_size : huge_sizes) {
4109 ASSERT_GT(huge_size, kMaxBucketed);
4110 for (size_t alignment : alignments) {
4111 // For direct map, we commit only as many pages as needed.
4112 size_t aligned_size = partition_alloc::internal::base::bits::AlignUp(
4113 huge_size, SystemPageSize());
4114 ptr = root.AllocInternalForTesting(huge_size - ExtraAllocSize(allocator),
4115 alignment, type_name);
4116 expected_committed_size += aligned_size;
4117 expected_max_committed_size =
4118 std::max(expected_max_committed_size, expected_committed_size);
4119 expected_total_allocated_size += aligned_size;
4120 expected_max_allocated_size =
4121 std::max(expected_max_allocated_size, expected_total_allocated_size);
4122 // The total reserved map includes metadata and guard pages at the ends.
4123 // It also includes alignment. However, these would double count the first
4124 // partition page, so it needs to be subtracted.
4125 size_t surrounding_pages_size =
4126 PartitionRoot::GetDirectMapMetadataAndGuardPagesSize() + alignment -
4127 PartitionPageSize();
4128 size_t expected_direct_map_size =
4129 partition_alloc::internal::base::bits::AlignUp(
4130 aligned_size + surrounding_pages_size,
4131 DirectMapAllocationGranularity());
4132 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4133 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4134 EXPECT_EQ(expected_total_allocated_size,
4135 root.get_total_size_of_allocated_bytes());
4136 EXPECT_EQ(expected_max_allocated_size,
4137 root.get_max_size_of_allocated_bytes());
4138 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4139 EXPECT_EQ(expected_direct_map_size,
4140 root.total_size_of_direct_mapped_pages);
4141
4142 // Freeing memory in the diret map decommits pages right away. The address
4143 // space is released for re-use too.
4144 root.Free(ptr);
4145 expected_committed_size -= aligned_size;
4146 expected_direct_map_size = 0;
4147 expected_max_committed_size =
4148 std::max(expected_max_committed_size, expected_committed_size);
4149 expected_total_allocated_size -= aligned_size;
4150 expected_max_allocated_size =
4151 std::max(expected_max_allocated_size, expected_total_allocated_size);
4152 EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
4153 EXPECT_EQ(expected_max_committed_size, root.max_size_of_committed_pages);
4154 EXPECT_EQ(expected_total_allocated_size,
4155 root.get_total_size_of_allocated_bytes());
4156 EXPECT_EQ(expected_max_allocated_size,
4157 root.get_max_size_of_allocated_bytes());
4158 EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
4159 EXPECT_EQ(expected_direct_map_size,
4160 root.total_size_of_direct_mapped_pages);
4161 }
4162 }
4163 }
4164
4165 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4166
TEST_P(PartitionAllocTest,RefCountBasic)4167 TEST_P(PartitionAllocTest, RefCountBasic) {
4168 if (!UseBRPPool()) {
4169 return;
4170 }
4171
4172 constexpr uint64_t kCookie = 0x1234567890ABCDEF;
4173 constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
4174
4175 size_t alloc_size = 64 - ExtraAllocSize(allocator);
4176 uint64_t* ptr1 =
4177 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4178 EXPECT_TRUE(ptr1);
4179
4180 *ptr1 = kCookie;
4181
4182 auto* in_slot_metadata =
4183 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr1);
4184 EXPECT_TRUE(in_slot_metadata->IsAliveWithNoKnownRefs());
4185
4186 in_slot_metadata->Acquire();
4187 EXPECT_FALSE(in_slot_metadata->Release());
4188 EXPECT_TRUE(in_slot_metadata->IsAliveWithNoKnownRefs());
4189 EXPECT_EQ(*ptr1, kCookie);
4190
4191 in_slot_metadata->AcquireFromUnprotectedPtr();
4192 EXPECT_FALSE(in_slot_metadata->IsAliveWithNoKnownRefs());
4193
4194 allocator.root()->Free(ptr1);
4195 // The allocation shouldn't be reclaimed, and its contents should be zapped.
4196 // Retag ptr1 to get its correct MTE tag.
4197 ptr1 = TagPtr(ptr1);
4198 EXPECT_NE(*ptr1, kCookie);
4199 EXPECT_EQ(*ptr1, kQuarantined);
4200
4201 // The allocator should not reuse the original slot since its reference count
4202 // doesn't equal zero.
4203 uint64_t* ptr2 =
4204 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4205 EXPECT_NE(ptr1, ptr2);
4206 allocator.root()->Free(ptr2);
4207
4208 // When the last reference is released, the slot should become reusable.
4209 // Retag in_slot_metadata because PartitionAlloc retags ptr to enforce
4210 // quarantine.
4211 in_slot_metadata = TagPtr(in_slot_metadata);
4212 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4213 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4214 uint64_t* ptr3 =
4215 static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
4216 EXPECT_EQ(ptr1, ptr3);
4217 allocator.root()->Free(ptr3);
4218 }
4219
RunRefCountReallocSubtest(size_t orig_size,size_t new_size)4220 void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
4221 size_t new_size) {
4222 void* ptr1 = allocator.root()->Alloc(orig_size, type_name);
4223 EXPECT_TRUE(ptr1);
4224
4225 auto* in_slot_metadata1 =
4226 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr1);
4227 EXPECT_TRUE(in_slot_metadata1->IsAliveWithNoKnownRefs());
4228
4229 in_slot_metadata1->AcquireFromUnprotectedPtr();
4230 EXPECT_FALSE(in_slot_metadata1->IsAliveWithNoKnownRefs());
4231
4232 void* ptr2 = allocator.root()->Realloc(ptr1, new_size, type_name);
4233 EXPECT_TRUE(ptr2);
4234
4235 // PartitionAlloc may retag memory areas on realloc (even if they
4236 // do not move), so recover the true tag here.
4237 in_slot_metadata1 = TagPtr(in_slot_metadata1);
4238
4239 // Re-query in-slot metadata. It may have moved if Realloc changed the slot.
4240 auto* in_slot_metadata2 =
4241 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr2);
4242
4243 if (UntagPtr(ptr1) == UntagPtr(ptr2)) {
4244 // If the slot didn't change, in-slot metadata should stay the same.
4245 EXPECT_EQ(in_slot_metadata1, in_slot_metadata2);
4246 EXPECT_FALSE(in_slot_metadata2->IsAliveWithNoKnownRefs());
4247
4248 EXPECT_FALSE(in_slot_metadata2->ReleaseFromUnprotectedPtr());
4249 } else {
4250 // If the allocation was moved to another slot, the old in-slot metadata
4251 // stayed in the same location in memory, is no longer alive, but still has
4252 // a reference. The new in-slot metadata is alive, but has no references.
4253 EXPECT_NE(in_slot_metadata1, in_slot_metadata2);
4254 EXPECT_FALSE(in_slot_metadata1->IsAlive());
4255 EXPECT_FALSE(in_slot_metadata1->IsAliveWithNoKnownRefs());
4256 EXPECT_TRUE(in_slot_metadata2->IsAliveWithNoKnownRefs());
4257
4258 EXPECT_TRUE(in_slot_metadata1->ReleaseFromUnprotectedPtr());
4259 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr1));
4260 }
4261
4262 allocator.root()->Free(ptr2);
4263 }
4264
TEST_P(PartitionAllocTest,RefCountRealloc)4265 TEST_P(PartitionAllocTest, RefCountRealloc) {
4266 if (!UseBRPPool()) {
4267 return;
4268 }
4269
4270 size_t raw_sizes[] = {500, 5000, 50000, 400000, 5000000};
4271
4272 for (size_t raw_size : raw_sizes) {
4273 size_t alloc_size = raw_size - ExtraAllocSize(allocator);
4274 RunRefCountReallocSubtest(alloc_size, alloc_size - 9);
4275 RunRefCountReallocSubtest(alloc_size, alloc_size + 9);
4276 RunRefCountReallocSubtest(alloc_size, alloc_size * 2);
4277 RunRefCountReallocSubtest(alloc_size, alloc_size / 2);
4278 RunRefCountReallocSubtest(alloc_size, alloc_size / 10 * 11);
4279 RunRefCountReallocSubtest(alloc_size, alloc_size / 10 * 9);
4280 }
4281 }
4282
4283 int g_unretained_dangling_raw_ptr_detected_count = 0;
4284
4285 class UnretainedDanglingRawPtrTest : public PartitionAllocTest {
4286 public:
SetUp()4287 void SetUp() override {
4288 PartitionAllocTest::SetUp();
4289 g_unretained_dangling_raw_ptr_detected_count = 0;
4290 old_detected_fn_ = partition_alloc::GetUnretainedDanglingRawPtrDetectedFn();
4291
4292 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
4293 &UnretainedDanglingRawPtrTest::DanglingRawPtrDetected);
4294 old_unretained_dangling_ptr_enabled_ =
4295 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(true);
4296 }
TearDown()4297 void TearDown() override {
4298 partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(old_detected_fn_);
4299 partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(
4300 old_unretained_dangling_ptr_enabled_);
4301 PartitionAllocTest::TearDown();
4302 }
4303
4304 private:
DanglingRawPtrDetected(uintptr_t)4305 static void DanglingRawPtrDetected(uintptr_t) {
4306 g_unretained_dangling_raw_ptr_detected_count++;
4307 }
4308
4309 partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_;
4310 bool old_unretained_dangling_ptr_enabled_;
4311 };
4312
4313 INSTANTIATE_TEST_SUITE_P(AlternateTestParams,
4314 UnretainedDanglingRawPtrTest,
4315 testing::ValuesIn(GetPartitionAllocTestParams()));
4316
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrNoReport)4317 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {
4318 if (!UseBRPPool()) {
4319 return;
4320 }
4321
4322 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4323 EXPECT_TRUE(ptr);
4324 auto* in_slot_metadata =
4325 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4326 in_slot_metadata->Acquire();
4327 EXPECT_TRUE(in_slot_metadata->IsAlive());
4328 // Allocation is still live, so calling ReportIfDangling() should not result
4329 // in any detections.
4330 in_slot_metadata->ReportIfDangling();
4331 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 0);
4332 EXPECT_FALSE(in_slot_metadata->Release());
4333 allocator.root()->Free(ptr);
4334 }
4335
TEST_P(UnretainedDanglingRawPtrTest,UnretainedDanglingPtrShouldReport)4336 TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {
4337 if (!UseBRPPool()) {
4338 return;
4339 }
4340
4341 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4342 EXPECT_TRUE(ptr);
4343 auto* in_slot_metadata =
4344 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4345 in_slot_metadata->AcquireFromUnprotectedPtr();
4346 EXPECT_TRUE(in_slot_metadata->IsAlive());
4347 allocator.root()->Free(ptr);
4348 // At this point, memory shouldn't be alive...
4349 EXPECT_FALSE(in_slot_metadata->IsAlive());
4350 // ...and we should report the ptr as dangling.
4351 in_slot_metadata->ReportIfDangling();
4352 EXPECT_EQ(g_unretained_dangling_raw_ptr_detected_count, 1);
4353 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4354
4355 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4356 }
4357
4358 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
TEST_P(PartitionAllocTest,BackupRefPtrGuardRegion)4359 TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
4360 if (!UseBRPPool()) {
4361 return;
4362 }
4363
4364 size_t alignment = internal::PageAllocationGranularity();
4365
4366 uintptr_t requested_address;
4367 memset(&requested_address, internal::kQuarantinedByte,
4368 sizeof(requested_address));
4369 requested_address = RoundDownToPageAllocationGranularity(requested_address);
4370
4371 uintptr_t allocated_address =
4372 AllocPages(requested_address, alignment, alignment,
4373 PageAccessibilityConfiguration(
4374 PageAccessibilityConfiguration::kReadWrite),
4375 PageTag::kPartitionAlloc);
4376 EXPECT_NE(allocated_address, requested_address);
4377
4378 if (allocated_address) {
4379 FreePages(allocated_address, alignment);
4380 }
4381 }
4382 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
4383 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
4384
4385 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4386
4387 // Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
4388 // dangling.
TEST_P(PartitionAllocTest,DanglingPtr)4389 TEST_P(PartitionAllocTest, DanglingPtr) {
4390 if (!UseBRPPool()) {
4391 return;
4392 }
4393
4394 CountDanglingRawPtr dangling_checks;
4395
4396 // Allocate memory, and reference it from 3 raw_ptr.
4397 uint64_t* ptr = static_cast<uint64_t*>(
4398 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4399 auto* in_slot_metadata =
4400 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4401
4402 in_slot_metadata->Acquire();
4403 in_slot_metadata->Acquire();
4404 in_slot_metadata->Acquire();
4405 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4406 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4407
4408 // The first raw_ptr stops referencing it, before the memory has been
4409 // released.
4410 EXPECT_FALSE(in_slot_metadata->Release());
4411 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4412 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4413
4414 // Free it. This creates two dangling pointer.
4415 allocator.root()->Free(ptr);
4416 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4417 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4418
4419 // The dangling raw_ptr stop referencing it.
4420 EXPECT_FALSE(in_slot_metadata->Release());
4421 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4422 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4423
4424 // The dangling raw_ptr stop referencing it again.
4425 EXPECT_TRUE(in_slot_metadata->Release());
4426 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4427 EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
4428
4429 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4430 }
4431
4432 // Allocate memory, and reference it from 3
4433 // raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
4434 // doesn't trigger any dangling raw_ptr checks.
TEST_P(PartitionAllocTest,DanglingDanglingPtr)4435 TEST_P(PartitionAllocTest, DanglingDanglingPtr) {
4436 if (!UseBRPPool()) {
4437 return;
4438 }
4439
4440 CountDanglingRawPtr dangling_checks;
4441
4442 // Allocate memory, and reference it from 3 raw_ptr.
4443 uint64_t* ptr = static_cast<uint64_t*>(
4444 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4445 auto* in_slot_metadata =
4446 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4447 in_slot_metadata->AcquireFromUnprotectedPtr();
4448 in_slot_metadata->AcquireFromUnprotectedPtr();
4449 in_slot_metadata->AcquireFromUnprotectedPtr();
4450 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4451 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4452
4453 // The first raw_ptr<T, DisableDanglingPtrDetection> stops referencing it,
4454 // before the memory has been released.
4455 EXPECT_FALSE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4456 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4457 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4458
4459 // Free it. This creates two dangling raw_ptr<T, DisableDanglingPtrDetection>.
4460 allocator.root()->Free(ptr);
4461 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4462 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4463
4464 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4465 EXPECT_FALSE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4466 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4467 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4468
4469 // The dangling raw_ptr<T, DisableDanglingPtrDetection> stop referencing it
4470 // again.
4471 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4472 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4473 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4474
4475 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4476 }
4477
4478 // When 'free' is called, it remain one raw_ptr<> and one
4479 // raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
TEST_P(PartitionAllocTest,DanglingMixedReleaseRawPtrFirst)4480 TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {
4481 if (!UseBRPPool()) {
4482 return;
4483 }
4484
4485 CountDanglingRawPtr dangling_checks;
4486
4487 uint64_t* ptr = static_cast<uint64_t*>(
4488 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4489 auto* in_slot_metadata =
4490 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4491 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4492 in_slot_metadata->AcquireFromUnprotectedPtr();
4493 in_slot_metadata->Acquire();
4494 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4495 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4496
4497 // Free it.
4498 allocator.root()->Free(ptr);
4499 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4500 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4501
4502 // The raw_ptr<> stops referencing it.
4503 EXPECT_FALSE(in_slot_metadata->Release());
4504 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4505 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4506
4507 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4508 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4509 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4510 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4511
4512 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4513 }
4514
4515 // When 'free' is called, it remain one raw_ptr<> and one
4516 // raw_ptr<T, DisableDanglingPtrDetection>.
4517 // The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
4518 // triggers the dangling raw_ptr<> checks.
TEST_P(PartitionAllocTest,DanglingMixedReleaseDanglingPtrFirst)4519 TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {
4520 if (!UseBRPPool()) {
4521 return;
4522 }
4523
4524 CountDanglingRawPtr dangling_checks;
4525
4526 void* ptr =
4527 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4528 auto* in_slot_metadata =
4529 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4530 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4531 in_slot_metadata->AcquireFromUnprotectedPtr();
4532 in_slot_metadata->Acquire();
4533 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4534 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4535
4536 // Free it.
4537 allocator.root()->Free(ptr);
4538 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4539 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4540
4541 // The raw_ptr<> stops referencing it.
4542 EXPECT_FALSE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4543 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4544 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4545
4546 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4547 EXPECT_TRUE(in_slot_metadata->Release());
4548 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4549 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4550
4551 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4552 }
4553
4554 // When 'free' is called, it remains one
4555 // raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
4556 // dangling raw_ptr<>. Release the raw_ptr<> first.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtr)4557 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {
4558 if (!UseBRPPool()) {
4559 return;
4560 }
4561
4562 CountDanglingRawPtr dangling_checks;
4563
4564 void* ptr =
4565 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4566 auto* in_slot_metadata =
4567 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4568 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4569 in_slot_metadata->AcquireFromUnprotectedPtr();
4570 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4571 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4572
4573 // Free it once.
4574 allocator.root()->Free(ptr);
4575 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4576 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4577
4578 // A raw_ptr<> starts referencing it.
4579 in_slot_metadata->Acquire();
4580 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4581 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4582
4583 // The raw_ptr<> stops referencing it.
4584 EXPECT_FALSE(in_slot_metadata->Release());
4585 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4586 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4587
4588 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4589 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4590 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4591 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4592
4593 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4594 }
4595
4596 // Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
4597 // raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
TEST_P(PartitionAllocTest,DanglingPtrUsedToAcquireNewRawPtrVariant)4598 TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {
4599 if (!UseBRPPool()) {
4600 return;
4601 }
4602
4603 CountDanglingRawPtr dangling_checks;
4604
4605 void* ptr =
4606 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4607 auto* in_slot_metadata =
4608 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4609 // Acquire a raw_ptr<T, DisableDanglingPtrDetection>.
4610 in_slot_metadata->AcquireFromUnprotectedPtr();
4611 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4612 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4613
4614 // Free it.
4615 allocator.root()->Free(ptr);
4616 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4617 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4618
4619 // A raw_ptr<> starts referencing it.
4620 in_slot_metadata->Acquire();
4621 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4622 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4623
4624 // The raw_ptr<> stops referencing it.
4625 EXPECT_FALSE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4626 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4627 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4628
4629 // The raw_ptr<T, DisableDanglingPtrDetection> stops referencing it.
4630 EXPECT_TRUE(in_slot_metadata->Release());
4631 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4632 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4633
4634 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4635 }
4636
4637 // Acquire a raw_ptr<T>, and release it before freeing memory. In the
4638 // background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
4639 // doesn't trigger any dangling raw_ptr<T> checks.
TEST_P(PartitionAllocTest,RawPtrReleasedBeforeFree)4640 TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {
4641 if (!UseBRPPool()) {
4642 return;
4643 }
4644
4645 CountDanglingRawPtr dangling_checks;
4646
4647 void* ptr =
4648 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4649 auto* in_slot_metadata =
4650 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4651 // Acquire a raw_ptr<T, DisableDanglingPtrDetection> and a raw_ptr<>.
4652 in_slot_metadata->Acquire();
4653 in_slot_metadata->AcquireFromUnprotectedPtr();
4654 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4655 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4656
4657 // Release the raw_ptr<>.
4658 EXPECT_FALSE(in_slot_metadata->Release());
4659 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4660 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4661
4662 // Free it.
4663 allocator.root()->Free(ptr);
4664 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4665 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4666
4667 // The raw_ptr<T, DisableDanglingPtrDetection> stop referencing it.
4668 EXPECT_TRUE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4669 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4670 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4671
4672 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4673 }
4674
4675 // Similar to `PartitionAllocTest.DanglingPtr`, but using
4676 // `PartitionRoot::Free<FreeFlags::kSchedulerLoopQuarantine>`.
4677 // 1. `PartitionRoot::Free<kSchedulerLoopQuarantine>`
4678 // - The allocation is owned by Scheduler-Loop Quarantine.
4679 // 2. `InSlotMetadata::Release`
4680 // - The allocation is still owned by Scheduler-Loop Quarantine.
4681 // 3. The allocation gets purged from Scheduler-Loop Quarantine.
4682 // - Actual free happens here.
TEST_P(PartitionAllocTest,DanglingPtrReleaseBeforeSchedulerLoopQuarantineExit)4683 TEST_P(PartitionAllocTest,
4684 DanglingPtrReleaseBeforeSchedulerLoopQuarantineExit) {
4685 if (!UseBRPPool()) {
4686 return;
4687 }
4688
4689 CountDanglingRawPtr dangling_checks;
4690
4691 // Allocate memory, and reference it from 3 raw_ptr.
4692 uint64_t* ptr = static_cast<uint64_t*>(
4693 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4694 auto* ref_count =
4695 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4696
4697 ref_count->Acquire();
4698 ref_count->Acquire();
4699 ref_count->Acquire();
4700 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4701 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4702
4703 // The first raw_ptr stops referencing it, before the memory has been
4704 // released.
4705 EXPECT_FALSE(ref_count->Release());
4706 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4707 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4708
4709 // Free it. This creates two dangling pointer.
4710 allocator.root()->Free<FreeFlags::kSchedulerLoopQuarantine>(ptr);
4711 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4712 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4713
4714 // The dangling raw_ptr stop referencing it.
4715 EXPECT_FALSE(ref_count->Release());
4716 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4717 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4718
4719 // The dangling raw_ptr stop referencing it again.
4720 // Allocation should not be reclaimed because it is still held by the
4721 // allocator, in the quarantine.
4722 EXPECT_FALSE(ref_count->Release());
4723 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4724 EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
4725
4726 LightweightQuarantineBranch& branch =
4727 allocator.root()->GetSchedulerLoopQuarantineBranchForTesting();
4728 branch.Purge();
4729 }
4730
4731 // Similar to `PartitionAllocTest.DanglingPtr`, but using
4732 // `PartitionRoot::Free<FreeFlags::kSchedulerLoopQuarantine>`.
4733 // 1. `PartitionRoot::Free<kSchedulerLoopQuarantine>`
4734 // - The allocation is owned by Scheduler-Loop Quarantine.
4735 // 2. The allocation gets purged from Scheduler-Loop Quarantine.
4736 // - The allocation is now moved to BRP-quarantine.
4737 // 3. `InSlotMetadata::Release`
4738 // - Actual free happens here.
TEST_P(PartitionAllocTest,DanglingPtrReleaseAfterSchedulerLoopQuarantineExit)4739 TEST_P(PartitionAllocTest, DanglingPtrReleaseAfterSchedulerLoopQuarantineExit) {
4740 if (!UseBRPPool()) {
4741 return;
4742 }
4743
4744 CountDanglingRawPtr dangling_checks;
4745
4746 // Allocate memory, and reference it from 3 raw_ptr.
4747 uint64_t* ptr = static_cast<uint64_t*>(
4748 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name));
4749 auto* ref_count =
4750 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4751
4752 ref_count->Acquire();
4753 ref_count->Acquire();
4754 ref_count->Acquire();
4755 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4756 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4757
4758 // The first raw_ptr stops referencing it, before the memory has been
4759 // released.
4760 EXPECT_FALSE(ref_count->Release());
4761 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 0);
4762 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4763
4764 // Free it. This creates two dangling pointer.
4765 allocator.root()->Free<FreeFlags::kSchedulerLoopQuarantine>(ptr);
4766 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4767 EXPECT_EQ(g_dangling_raw_ptr_released_count, 0);
4768
4769 // The dangling raw_ptr stop referencing it.
4770 EXPECT_FALSE(ref_count->Release());
4771 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4772 EXPECT_EQ(g_dangling_raw_ptr_released_count, 1);
4773
4774 LightweightQuarantineBranch& branch =
4775 allocator.root()->GetSchedulerLoopQuarantineBranchForTesting();
4776 branch.Purge();
4777
4778 // The dangling raw_ptr stop referencing it again.
4779 // Allocation should not be reclaimed because it is still held by the
4780 // allocator, in the quarantine.
4781 EXPECT_TRUE(ref_count->Release());
4782 EXPECT_EQ(g_dangling_raw_ptr_detected_count, 1);
4783 EXPECT_EQ(g_dangling_raw_ptr_released_count, 2);
4784
4785 PartitionAllocFreeForRefCounting(allocator.root()->ObjectToSlotStart(ptr));
4786 }
4787
4788 #if defined(PA_HAS_DEATH_TESTS)
4789 // DCHECK message are stripped in official build. It causes death tests with
4790 // matchers to fail.
4791 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4792
4793 // Acquire() once, Release() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowRawPtr)4794 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {
4795 if (!UseBRPPool()) {
4796 return;
4797 }
4798
4799 void* ptr =
4800 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4801 auto* in_slot_metadata =
4802 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4803 in_slot_metadata->Acquire();
4804 EXPECT_FALSE(in_slot_metadata->Release());
4805 EXPECT_DCHECK_DEATH(in_slot_metadata->Release());
4806 allocator.root()->Free(ptr);
4807 }
4808
4809 // AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
TEST_P(PartitionAllocDeathTest,ReleaseUnderflowDanglingPtr)4810 TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {
4811 if (!UseBRPPool()) {
4812 return;
4813 }
4814
4815 void* ptr =
4816 allocator.root()->Alloc(64 - ExtraAllocSize(allocator), type_name);
4817 auto* in_slot_metadata =
4818 allocator.root()->InSlotMetadataPointerFromObjectForTesting(ptr);
4819 in_slot_metadata->AcquireFromUnprotectedPtr();
4820 EXPECT_FALSE(in_slot_metadata->ReleaseFromUnprotectedPtr());
4821 EXPECT_DCHECK_DEATH(in_slot_metadata->ReleaseFromUnprotectedPtr());
4822 allocator.root()->Free(ptr);
4823 }
4824
4825 #endif //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
4826 #endif // defined(PA_HAS_DEATH_TESTS)
4827 #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
4828
TEST_P(PartitionAllocTest,ReservationOffset)4829 TEST_P(PartitionAllocTest, ReservationOffset) {
4830 // For normal buckets, offset should be kOffsetTagNormalBuckets.
4831 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4832 EXPECT_TRUE(ptr);
4833 uintptr_t address = UntagPtr(ptr);
4834 EXPECT_EQ(kOffsetTagNormalBuckets, *ReservationOffsetPointer(address));
4835 allocator.root()->Free(ptr);
4836
4837 // For direct-map,
4838 size_t large_size = kSuperPageSize * 5 + PartitionPageSize() * .5f;
4839 ASSERT_GT(large_size, kMaxBucketed);
4840 ptr = allocator.root()->Alloc(large_size, type_name);
4841 EXPECT_TRUE(ptr);
4842 address = UntagPtr(ptr);
4843 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4844 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4845 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4846 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4847 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4848 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4849
4850 // In-place realloc doesn't affect the offsets.
4851 void* new_ptr = allocator.root()->Realloc(ptr, large_size * .8, type_name);
4852 EXPECT_EQ(new_ptr, ptr);
4853 EXPECT_EQ(0U, *ReservationOffsetPointer(address));
4854 EXPECT_EQ(1U, *ReservationOffsetPointer(address + kSuperPageSize));
4855 EXPECT_EQ(2U, *ReservationOffsetPointer(address + kSuperPageSize * 2));
4856 EXPECT_EQ(3U, *ReservationOffsetPointer(address + kSuperPageSize * 3));
4857 EXPECT_EQ(4U, *ReservationOffsetPointer(address + kSuperPageSize * 4));
4858 EXPECT_EQ(5U, *ReservationOffsetPointer(address + kSuperPageSize * 5));
4859
4860 allocator.root()->Free(ptr);
4861 // After free, the offsets must be kOffsetTagNotAllocated.
4862 EXPECT_EQ(kOffsetTagNotAllocated, *ReservationOffsetPointer(address));
4863 EXPECT_EQ(kOffsetTagNotAllocated,
4864 *ReservationOffsetPointer(address + kSuperPageSize));
4865 EXPECT_EQ(kOffsetTagNotAllocated,
4866 *ReservationOffsetPointer(address + kSuperPageSize * 2));
4867 EXPECT_EQ(kOffsetTagNotAllocated,
4868 *ReservationOffsetPointer(address + kSuperPageSize * 3));
4869 EXPECT_EQ(kOffsetTagNotAllocated,
4870 *ReservationOffsetPointer(address + kSuperPageSize * 4));
4871 EXPECT_EQ(kOffsetTagNotAllocated,
4872 *ReservationOffsetPointer(address + kSuperPageSize * 5));
4873 }
4874
TEST_P(PartitionAllocTest,GetReservationStart)4875 TEST_P(PartitionAllocTest, GetReservationStart) {
4876 size_t large_size = kSuperPageSize * 3 + PartitionPageSize() * .5f;
4877 ASSERT_GT(large_size, kMaxBucketed);
4878 void* ptr = allocator.root()->Alloc(large_size, type_name);
4879 EXPECT_TRUE(ptr);
4880 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
4881 uintptr_t reservation_start = slot_start - PartitionPageSize();
4882 EXPECT_EQ(0U, reservation_start & DirectMapAllocationGranularityOffsetMask());
4883
4884 uintptr_t address = UntagPtr(ptr);
4885 for (uintptr_t a = address; a < address + large_size; ++a) {
4886 uintptr_t address2 = GetDirectMapReservationStart(a) + PartitionPageSize();
4887 EXPECT_EQ(slot_start, address2);
4888 }
4889
4890 EXPECT_EQ(reservation_start, GetDirectMapReservationStart(slot_start));
4891
4892 allocator.root()->Free(ptr);
4893 }
4894
4895 #if BUILDFLAG(IS_FUCHSIA)
4896 // TODO: https://crbug.com/331366007 - re-enable on Fuchsia once bug is fixed.
TEST_P(PartitionAllocTest,DISABLED_CheckReservationType)4897 TEST_P(PartitionAllocTest, DISABLED_CheckReservationType) {
4898 #else
4899 TEST_P(PartitionAllocTest, CheckReservationType) {
4900 #endif // BUILDFLAG(IS_FUCHSIA)
4901 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
4902 EXPECT_TRUE(ptr);
4903 uintptr_t address = UntagPtr(ptr);
4904 uintptr_t address_to_check = address;
4905 EXPECT_FALSE(IsReservationStart(address_to_check));
4906 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4907 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4908 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4909 address_to_check = address + kTestAllocSize - 1;
4910 EXPECT_FALSE(IsReservationStart(address_to_check));
4911 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4912 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4913 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4914 address_to_check =
4915 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4916 EXPECT_TRUE(IsReservationStart(address_to_check));
4917 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4918 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4919 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4920 allocator.root()->Free(ptr);
4921 // Freeing keeps a normal-bucket super page in memory.
4922 address_to_check =
4923 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4924 EXPECT_TRUE(IsReservationStart(address_to_check));
4925 EXPECT_TRUE(IsManagedByNormalBuckets(address_to_check));
4926 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4927 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4928
4929 size_t large_size = 2 * kSuperPageSize;
4930 ASSERT_GT(large_size, kMaxBucketed);
4931 ptr = allocator.root()->Alloc(large_size, type_name);
4932 EXPECT_TRUE(ptr);
4933 address = UntagPtr(ptr);
4934 address_to_check = address;
4935 EXPECT_FALSE(IsReservationStart(address_to_check));
4936 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4937 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4938 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4939 address_to_check =
4940 partition_alloc::internal::base::bits::AlignUp(address, kSuperPageSize);
4941 EXPECT_FALSE(IsReservationStart(address_to_check));
4942 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4943 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4944 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4945 address_to_check = address + large_size - 1;
4946 EXPECT_FALSE(IsReservationStart(address_to_check));
4947 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4948 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4949 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4950 address_to_check =
4951 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4952 EXPECT_TRUE(IsReservationStart(address_to_check));
4953 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4954 EXPECT_TRUE(IsManagedByDirectMap(address_to_check));
4955 EXPECT_TRUE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4956 allocator.root()->Free(ptr);
4957 // Freeing releases direct-map super pages.
4958 address_to_check =
4959 partition_alloc::internal::base::bits::AlignDown(address, kSuperPageSize);
4960
4961 // DCHECKs don't work with EXPECT_DEATH on official builds.
4962 #if BUILDFLAG(PA_DCHECK_IS_ON) && (!defined(OFFICIAL_BUILD) || !defined(NDEBUG))
4963 // Expect to DCHECK on unallocated region.
4964 EXPECT_DEATH_IF_SUPPORTED(IsReservationStart(address_to_check), "");
4965 #endif // BUILDFLAG(PA_DCHECK_IS_ON) && (!defined(OFFICIAL_BUILD) ||
4966 // !defined(NDEBUG))
4967
4968 EXPECT_FALSE(IsManagedByNormalBuckets(address_to_check));
4969 EXPECT_FALSE(IsManagedByDirectMap(address_to_check));
4970 EXPECT_FALSE(IsManagedByNormalBucketsOrDirectMap(address_to_check));
4971 }
4972
4973 // Test for crash http://crbug.com/1169003.
4974 TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {
4975 // Size is large enough to satisfy it from a single-slot slot span
4976 size_t test_size = MaxRegularSlotSpanSize() - ExtraAllocSize(allocator);
4977 void* ptr = allocator.root()->Alloc<AllocFlags::kReturnNull>(test_size);
4978 EXPECT_TRUE(ptr);
4979
4980 // Create new root and call PurgeMemory to simulate ConfigurePartitions().
4981 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
4982 PurgeFlags::kDiscardUnusedSystemPages);
4983 std::unique_ptr<PartitionRoot> new_root = CreateCustomTestRoot(
4984 GetCommonPartitionOptions(),
4985 PartitionTestOptions{.set_bucket_distribution = true});
4986
4987 // Realloc from |allocator.root()| into |new_root|.
4988 void* ptr2 = new_root->Realloc<AllocFlags::kReturnNull>(ptr, test_size + 1024,
4989 nullptr);
4990 EXPECT_TRUE(ptr2);
4991 PA_EXPECT_PTR_NE(ptr, ptr2);
4992 }
4993
4994 TEST_P(PartitionAllocTest, FastPathOrReturnNull) {
4995 size_t allocation_size = 64;
4996 // The very first allocation is never a fast path one, since it needs a new
4997 // super page and a new partition page.
4998 EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
4999 allocation_size, ""));
5000 void* ptr = allocator.root()->Alloc(allocation_size);
5001 ASSERT_TRUE(ptr);
5002
5003 // Next one is, since the partition page has been activated.
5004 void* ptr2 = allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
5005 allocation_size, "");
5006 EXPECT_TRUE(ptr2);
5007
5008 // First allocation of a different bucket is slow.
5009 EXPECT_FALSE(allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
5010 2 * allocation_size, ""));
5011
5012 size_t allocated_size = 2 * allocation_size;
5013 std::vector<void*> ptrs;
5014 while (void* new_ptr =
5015 allocator.root()->Alloc<AllocFlags::kFastPathOrReturnNull>(
5016 allocation_size, "")) {
5017 ptrs.push_back(new_ptr);
5018 allocated_size += allocation_size;
5019 }
5020 EXPECT_LE(allocated_size,
5021 PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan);
5022
5023 for (void* ptr_to_free : ptrs) {
5024 allocator.root()->Free<FreeFlags::kNoHooks>(ptr_to_free);
5025 }
5026
5027 allocator.root()->Free<FreeFlags::kNoHooks>(ptr);
5028 allocator.root()->Free<FreeFlags::kNoHooks>(ptr2);
5029 }
5030
5031 #if defined(PA_HAS_DEATH_TESTS)
5032 // DCHECK message are stripped in official build. It causes death tests with
5033 // matchers to fail.
5034 #if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)
5035
5036 TEST_P(PartitionAllocDeathTest, CheckTriggered) {
5037 EXPECT_DCHECK_DEATH_WITH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
5038 EXPECT_DEATH(PA_CHECK(5 == 7), "Check failed.*5 == 7");
5039 }
5040
5041 #endif // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
5042 #endif // defined(PA_HAS_DEATH_TESTS)
5043
5044 // Not on chromecast, since gtest considers extra output from itself as a test
5045 // failure:
5046 // https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
5047 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_DEATH_TESTS) && \
5048 !BUILDFLAG(PA_IS_CASTOS)
5049
5050 namespace {
5051
5052 PA_NOINLINE void FreeForTest(void* data) {
5053 free(data);
5054 }
5055
5056 class ThreadDelegateForPreforkHandler
5057 : public base::PlatformThreadForTesting::Delegate {
5058 public:
5059 ThreadDelegateForPreforkHandler(std::atomic<bool>& please_stop,
5060 std::atomic<int>& started_threads,
5061 const int alloc_size)
5062 : please_stop_(please_stop),
5063 started_threads_(started_threads),
5064 alloc_size_(alloc_size) {}
5065
5066 void ThreadMain() override {
5067 started_threads_++;
5068 while (!please_stop_.load(std::memory_order_relaxed)) {
5069 void* ptr = malloc(alloc_size_);
5070
5071 // A simple malloc() / free() pair can be discarded by the compiler (and
5072 // is), making the test fail. It is sufficient to make |FreeForTest()| a
5073 // PA_NOINLINE function for the call to not be eliminated, but it is
5074 // required.
5075 FreeForTest(ptr);
5076 }
5077 }
5078
5079 private:
5080 std::atomic<bool>& please_stop_;
5081 std::atomic<int>& started_threads_;
5082 const int alloc_size_;
5083 };
5084
5085 } // namespace
5086
5087 // Disabled because executing it causes Gtest to show a warning in the output,
5088 // which confuses the runner on some platforms, making the test report an
5089 // "UNKNOWN" status even though it succeeded.
5090 TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {
5091 std::atomic<bool> please_stop;
5092 std::atomic<int> started_threads{0};
5093
5094 // Continuously allocates / frees memory, bypassing the thread cache. This
5095 // makes it likely that this thread will own the lock, and that the
5096 // EXPECT_EXIT() part will deadlock.
5097 constexpr size_t kAllocSize = ThreadCache::kLargeSizeThreshold + 1;
5098 ThreadDelegateForPreforkHandler delegate(please_stop, started_threads,
5099 kAllocSize);
5100
5101 constexpr int kThreads = 4;
5102 base::PlatformThreadHandle thread_handles[kThreads];
5103 for (auto& thread_handle : thread_handles) {
5104 base::PlatformThreadForTesting::Create(0, &delegate, &thread_handle);
5105 }
5106 // Make sure all threads are actually already running.
5107 while (started_threads != kThreads) {
5108 }
5109
5110 EXPECT_EXIT(
5111 {
5112 void* ptr = malloc(kAllocSize);
5113 FreeForTest(ptr);
5114 exit(1);
5115 },
5116 ::testing::ExitedWithCode(1), "");
5117
5118 please_stop.store(true);
5119 for (auto& thread_handle : thread_handles) {
5120 base::PlatformThreadForTesting::Join(thread_handle);
5121 }
5122 }
5123
5124 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5125 // PA_CONFIG(HAS_DEATH_TESTS) && !BUILDFLAG(PA_IS_CASTOS)
5126
5127 // Checks the bucket index logic.
5128 TEST_P(PartitionAllocTest, GetIndex) {
5129 BucketIndexLookup lookup{};
5130
5131 for (size_t size = 0; size < kMaxBucketed; size++) {
5132 size_t index = BucketIndexLookup::GetIndex(size);
5133 ASSERT_GE(lookup.bucket_sizes()[index], size);
5134 }
5135
5136 // Make sure that power-of-two have exactly matching buckets.
5137 for (size_t size = (1 << (kMinBucketedOrder - 1)); size < kMaxBucketed;
5138 size <<= 1) {
5139 size_t index = BucketIndexLookup::GetIndex(size);
5140 ASSERT_EQ(lookup.bucket_sizes()[index], size);
5141 }
5142 }
5143
5144 // Used to check alignment. If the compiler understands the annotations, the
5145 // zeroing in the constructor uses aligned SIMD instructions.
5146 TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {
5147 struct TestStruct {
5148 uint64_t a = 0;
5149 uint64_t b = 0;
5150 };
5151
5152 void* buffer = Alloc(sizeof(TestStruct));
5153 // Should use "mov*a*ps" on x86_64.
5154 auto* x = new (buffer) TestStruct();
5155
5156 EXPECT_EQ(x->a, 0u);
5157 Free(buffer);
5158 }
5159
5160 // Test that the ConfigurablePool works properly.
5161 TEST_P(PartitionAllocTest, ConfigurablePool) {
5162 EXPECT_FALSE(IsConfigurablePoolAvailable());
5163
5164 // The rest is only applicable to 64-bit mode
5165 #if defined(ARCH_CPU_64_BITS)
5166 // Repeat the test for every possible Pool size
5167 const size_t max_pool_size = PartitionAddressSpace::ConfigurablePoolMaxSize();
5168 const size_t min_pool_size = PartitionAddressSpace::ConfigurablePoolMinSize();
5169 for (size_t pool_size = max_pool_size; pool_size >= min_pool_size;
5170 pool_size /= 2) {
5171 PA_DCHECK(std::has_single_bit(pool_size));
5172 EXPECT_FALSE(IsConfigurablePoolAvailable());
5173 uintptr_t pool_base =
5174 AllocPages(pool_size, pool_size,
5175 PageAccessibilityConfiguration(
5176 PageAccessibilityConfiguration::kInaccessible),
5177 PageTag::kPartitionAlloc);
5178 EXPECT_NE(0u, pool_base);
5179 PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size);
5180
5181 EXPECT_TRUE(IsConfigurablePoolAvailable());
5182
5183 PartitionOptions opts = GetCommonPartitionOptions();
5184 opts.use_configurable_pool = PartitionOptions::kAllowed;
5185 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5186 opts, PartitionTestOptions{.uncap_empty_slot_span_memory = true,
5187 .set_bucket_distribution = true});
5188
5189 const size_t count = 250;
5190 std::vector<void*> allocations(count, nullptr);
5191 for (size_t i = 0; i < count; ++i) {
5192 const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
5193 allocations[i] = root->Alloc(size);
5194 EXPECT_NE(nullptr, allocations[i]);
5195 // We don't Untag allocations here because MTE is disabled for
5196 // configurable pools used by V8.
5197 // https://bugs.chromium.org/p/v8/issues/detail?id=13117
5198 uintptr_t allocation_base = reinterpret_cast<uintptr_t>(allocations[i]);
5199 EXPECT_EQ(allocation_base, UntagPtr(allocations[i]));
5200 EXPECT_TRUE(allocation_base >= pool_base &&
5201 allocation_base < pool_base + pool_size);
5202 }
5203
5204 PartitionAddressSpace::UninitConfigurablePoolForTesting();
5205 FreePages(pool_base, pool_size);
5206 }
5207
5208 #endif // defined(ARCH_CPU_64_BITS)
5209 }
5210
5211 TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {
5212 // Use another root, since the ones from the test harness disable the empty
5213 // slot span size cap.
5214 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5215 GetCommonPartitionOptions(),
5216 PartitionTestOptions{.set_bucket_distribution = true});
5217
5218 // Allocate some memory, don't free it to keep committed memory.
5219 std::vector<void*> allocated_memory;
5220 const size_t size = SystemPageSize();
5221 const size_t count = 400;
5222 for (size_t i = 0; i < count; i++) {
5223 void* ptr = root->Alloc(size);
5224 allocated_memory.push_back(ptr);
5225 }
5226 ASSERT_GE(root->total_size_of_committed_pages.load(std::memory_order_relaxed),
5227 size * count);
5228
5229 // To create empty slot spans, allocate from single-slot slot spans, 128kiB at
5230 // a time.
5231 std::vector<void*> single_slot_allocated_memory;
5232 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize - 1;
5233 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5234 // Make sure that even with allocation size rounding up, a single allocation
5235 // is still below the threshold.
5236 ASSERT_LT(MaxRegularSlotSpanSize() * 2,
5237 ((count * size) >> root->max_empty_slot_spans_dirty_bytes_shift));
5238 for (size_t i = 0; i < single_slot_count; i++) {
5239 void* ptr = root->Alloc(single_slot_size);
5240 single_slot_allocated_memory.push_back(ptr);
5241 }
5242
5243 // Free everything at once, creating as many empty slot spans as there are
5244 // allocations (since they are from single-slot slot spans).
5245 for (void* ptr : single_slot_allocated_memory) {
5246 root->Free(ptr);
5247 }
5248
5249 // Still have some committed empty slot spans.
5250 // PA_TS_UNCHECKED_READ() is not an issue here, since everything is
5251 // single-threaded.
5252 EXPECT_GT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
5253 // But not all, as the cap triggered.
5254 EXPECT_LT(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5255 single_slot_count * single_slot_size);
5256
5257 // Nothing left after explicit purge.
5258 root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5259 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes), 0u);
5260
5261 for (void* ptr : allocated_memory) {
5262 root->Free(ptr);
5263 }
5264 }
5265
5266 TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
5267 std::unique_ptr<PartitionRoot> root = CreateCustomTestRoot(
5268 GetCommonPartitionOptions(),
5269 PartitionTestOptions{.uncap_empty_slot_span_memory = true,
5270 .set_bucket_distribution = true});
5271
5272 std::vector<void*> single_slot_allocated_memory;
5273 constexpr size_t single_slot_count = kDefaultEmptySlotSpanRingSize + 10;
5274 const size_t single_slot_size = MaxRegularSlotSpanSize() + 1;
5275 const size_t bucket_size =
5276 root->buckets[SizeToIndex(single_slot_size)].slot_size;
5277
5278 for (size_t i = 0; i < single_slot_count; i++) {
5279 void* ptr = root->Alloc(single_slot_size);
5280 single_slot_allocated_memory.push_back(ptr);
5281 }
5282
5283 // Free everything at once, creating as many empty slot spans as there are
5284 // allocations (since they are from single-slot slot spans).
5285 for (void* ptr : single_slot_allocated_memory) {
5286 root->Free(ptr);
5287 }
5288 single_slot_allocated_memory.clear();
5289
5290 // Some of the free()-s above overflowed the slot span ring.
5291 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5292 kDefaultEmptySlotSpanRingSize * bucket_size);
5293
5294 // Now can cache more slot spans.
5295 root->EnableLargeEmptySlotSpanRing();
5296
5297 constexpr size_t single_slot_large_count = kDefaultEmptySlotSpanRingSize + 10;
5298 // The assertion following the alloc/free checks that the ring contains the
5299 // slots spans for the allocations done here. Slot spans that have not yet
5300 // been added to the ring are added at
5301 // `PartitionRoot::global_empty_slot_span_ring_index`. By iterating twice,
5302 // we ensure the ring contains the allocations here. This is because the
5303 // first time through the empty slot span may decommit one of the allocations
5304 // done here, the second time through that won't happen (because
5305 // `global_empty_slot_span_ring_index` will have incremented past
5306 // `kDefaultEmptySlotSpanRingSize`, and the frees in the second iteration
5307 // won't decommit one of the allocations here).
5308 for (int x = 0; x < 2; ++x) {
5309 for (size_t i = 0; i < single_slot_large_count; i++) {
5310 void* ptr = root->Alloc(single_slot_size);
5311 single_slot_allocated_memory.push_back(ptr);
5312 }
5313
5314 for (void* ptr : single_slot_allocated_memory) {
5315 root->Free(ptr);
5316 }
5317 single_slot_allocated_memory.clear();
5318 }
5319
5320 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5321 single_slot_large_count * bucket_size);
5322
5323 // Constants used here don't work with USE_LARGE_EMPTY_SLOT_SPAN_RING.
5324 #if !BUILDFLAG(USE_LARGE_EMPTY_SLOT_SPAN_RING)
5325 constexpr size_t single_slot_too_many_count = kMaxFreeableSpans + 10;
5326 for (size_t i = 0; i < single_slot_too_many_count; i++) {
5327 void* ptr = root->Alloc(single_slot_size);
5328 single_slot_allocated_memory.push_back(ptr);
5329 }
5330
5331 for (void* ptr : single_slot_allocated_memory) {
5332 root->Free(ptr);
5333 }
5334 single_slot_allocated_memory.clear();
5335
5336 // Overflow still works.
5337 EXPECT_EQ(PA_TS_UNCHECKED_READ(root->empty_slot_spans_dirty_bytes),
5338 kMaxFreeableSpans * bucket_size);
5339 #endif
5340 }
5341
5342 #if BUILDFLAG(PA_IS_CAST_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
5343 extern "C" {
5344 void* __real_malloc(size_t);
5345 } // extern "C"
5346
5347 TEST_P(PartitionAllocTest, HandleMixedAllocations) {
5348 void* ptr = __real_malloc(12);
5349 // Should not crash, no test assertion.
5350 free(ptr);
5351 }
5352 #endif
5353
5354 TEST_P(PartitionAllocTest, SortFreelist) {
5355 const size_t count = 100;
5356 const size_t allocation_size = 1;
5357 void* first_ptr = allocator.root()->Alloc(allocation_size);
5358
5359 std::vector<void*> allocations;
5360 for (size_t i = 0; i < count; ++i) {
5361 allocations.push_back(allocator.root()->Alloc(allocation_size));
5362 }
5363
5364 // Shuffle and free memory out of order.
5365 std::random_device rd;
5366 std::mt19937 generator(rd());
5367 std::shuffle(allocations.begin(), allocations.end(), generator);
5368
5369 // Keep one allocation alive (first_ptr), so that the SlotSpan is not fully
5370 // empty.
5371 for (void* ptr : allocations) {
5372 allocator.root()->Free(ptr);
5373 }
5374 allocations.clear();
5375
5376 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5377
5378 size_t bucket_index =
5379 SizeToIndex(allocation_size + ExtraAllocSize(allocator));
5380 auto& bucket = allocator.root()->buckets[bucket_index];
5381 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5382
5383 // Can sort again.
5384 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5385 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5386
5387 for (size_t i = 0; i < count; ++i) {
5388 allocations.push_back(allocator.root()->Alloc(allocation_size));
5389 // Allocating keeps the freelist sorted.
5390 EXPECT_TRUE(bucket.active_slot_spans_head->freelist_is_sorted());
5391 }
5392
5393 // Check that it is sorted.
5394 for (size_t i = 1; i < allocations.size(); i++) {
5395 EXPECT_LT(UntagPtr(allocations[i - 1]), UntagPtr(allocations[i]));
5396 }
5397
5398 for (void* ptr : allocations) {
5399 allocator.root()->Free(ptr);
5400 // Free()-ing memory destroys order. Not looking at the head of the active
5401 // list, as it is not necessarily the one from which |ptr| came from.
5402 auto* slot_span = SlotSpanMetadata::FromSlotStart(
5403 allocator.root()->ObjectToSlotStart(ptr));
5404 EXPECT_FALSE(slot_span->freelist_is_sorted());
5405 }
5406
5407 allocator.root()->Free(first_ptr);
5408 }
5409
5410 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_LINUX) && \
5411 defined(ARCH_CPU_64_BITS)
5412 TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {
5413 int not_a_heap_object = 42;
5414 EXPECT_DEATH(allocator.root()->Free(¬_a_heap_object), "");
5415 }
5416 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5417 // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_64_BITS)
5418
5419 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_MAC)
5420
5421 // Adapted from crashpad tests.
5422 class ScopedOpenCLNoOpKernel {
5423 public:
5424 ScopedOpenCLNoOpKernel()
5425 : context_(nullptr),
5426 program_(nullptr),
5427 kernel_(nullptr),
5428 success_(false) {}
5429
5430 ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
5431 ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;
5432
5433 ~ScopedOpenCLNoOpKernel() {
5434 if (kernel_) {
5435 cl_int rv = clReleaseKernel(kernel_);
5436 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
5437 }
5438
5439 if (program_) {
5440 cl_int rv = clReleaseProgram(program_);
5441 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
5442 }
5443
5444 if (context_) {
5445 cl_int rv = clReleaseContext(context_);
5446 EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
5447 }
5448 }
5449
5450 void SetUp() {
5451 cl_platform_id platform_id;
5452 cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
5453 ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
5454 cl_device_id device_id;
5455 rv =
5456 clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
5457 #if defined(ARCH_CPU_ARM64)
5458 // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
5459 // these weird OpenCL modules probably don’t show up there at all. Keep this
5460 // test even on arm64 in case this ever does start working.
5461 if (rv == CL_INVALID_VALUE) {
5462 return;
5463 }
5464 #endif // ARCH_CPU_ARM64
5465 ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";
5466
5467 context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
5468 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";
5469
5470 const char* sources[] = {
5471 "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
5472 };
5473 const size_t source_lengths[] = {
5474 strlen(sources[0]),
5475 };
5476 static_assert(std::size(sources) == std::size(source_lengths),
5477 "arrays must be parallel");
5478
5479 program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
5480 source_lengths, &rv);
5481 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";
5482
5483 rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
5484 nullptr);
5485 ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";
5486
5487 kernel_ = clCreateKernel(program_, "NoOp", &rv);
5488 ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";
5489
5490 success_ = true;
5491 }
5492
5493 bool success() const { return success_; }
5494
5495 private:
5496 cl_context context_;
5497 cl_program program_;
5498 cl_kernel kernel_;
5499 bool success_;
5500 };
5501
5502 // On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
5503 // is incorrectly passed by CoreFoundation to the previous default zone,
5504 // causing crashes. This is intended to detect these issues regressing in future
5505 // versions of macOS.
5506 TEST_P(PartitionAllocTest, OpenCL) {
5507 ScopedOpenCLNoOpKernel kernel;
5508 kernel.SetUp();
5509 #if !defined(ARCH_CPU_ARM64)
5510 ASSERT_TRUE(kernel.success());
5511 #endif
5512 }
5513
5514 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
5515 // BUILDFLAG(IS_MAC)
5516
5517 TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {
5518 for (PartitionRoot::Bucket& bucket : allocator.root()->buckets) {
5519 const size_t slot_size = bucket.slot_size;
5520 if (slot_size == kInvalidBucketSize) {
5521 continue;
5522 }
5523
5524 size_t small_system_page_count =
5525 partition_alloc::internal::ComputeSystemPagesPerSlotSpan(
5526 bucket.slot_size, true);
5527 size_t small_waste =
5528 (small_system_page_count * SystemPageSize()) % slot_size;
5529
5530 EXPECT_LT(small_waste, .05 * SystemPageSize());
5531 if (slot_size <= MaxRegularSlotSpanSize()) {
5532 EXPECT_LE(small_system_page_count, MaxSystemPagesPerRegularSlotSpan());
5533 }
5534 }
5535 }
5536
5537 TEST_P(PartitionAllocTest, SortActiveSlotSpans) {
5538 auto run_test = [](size_t count) {
5539 PartitionBucket bucket;
5540 bucket.Init(16);
5541 bucket.active_slot_spans_head = nullptr;
5542
5543 std::vector<SlotSpanMetadata> slot_spans;
5544 slot_spans.reserve(count);
5545
5546 // Add slot spans with random freelist length.
5547 for (size_t i = 0; i < count; i++) {
5548 slot_spans.emplace_back(&bucket);
5549 auto& slot_span = slot_spans.back();
5550 slot_span.num_unprovisioned_slots =
5551 partition_alloc::internal::base::RandGenerator(
5552 bucket.get_slots_per_span() / 2);
5553 slot_span.num_allocated_slots =
5554 partition_alloc::internal::base::RandGenerator(
5555 bucket.get_slots_per_span() - slot_span.num_unprovisioned_slots);
5556 slot_span.next_slot_span = bucket.active_slot_spans_head;
5557 bucket.active_slot_spans_head = &slot_span;
5558 }
5559
5560 bucket.SortActiveSlotSpans();
5561
5562 std::set<SlotSpanMetadata*> seen_slot_spans;
5563 std::vector<SlotSpanMetadata*> sorted_slot_spans;
5564 for (auto* slot_span = bucket.active_slot_spans_head; slot_span;
5565 slot_span = slot_span->next_slot_span) {
5566 sorted_slot_spans.push_back(slot_span);
5567 seen_slot_spans.insert(slot_span);
5568 }
5569
5570 // None repeated, none missing.
5571 EXPECT_EQ(seen_slot_spans.size(), sorted_slot_spans.size());
5572 EXPECT_EQ(seen_slot_spans.size(), slot_spans.size());
5573
5574 // The first slot spans are sorted.
5575 size_t sorted_spans_count =
5576 std::min(PartitionBucket::kMaxSlotSpansToSort, count);
5577 EXPECT_TRUE(std::is_sorted(sorted_slot_spans.begin(),
5578 sorted_slot_spans.begin() + sorted_spans_count,
5579 partition_alloc::internal::CompareSlotSpans));
5580
5581 // Slot spans with no freelist entries are at the end of the sorted run.
5582 auto has_empty_freelist = [](SlotSpanMetadata* a) {
5583 return a->GetFreelistLength() == 0;
5584 };
5585 auto it = std::find_if(sorted_slot_spans.begin(),
5586 sorted_slot_spans.begin() + sorted_spans_count,
5587 has_empty_freelist);
5588 if (it != sorted_slot_spans.end()) {
5589 EXPECT_TRUE(std::all_of(it,
5590 sorted_slot_spans.begin() + sorted_spans_count,
5591 has_empty_freelist));
5592 }
5593 };
5594
5595 // Everything is sorted.
5596 run_test(PartitionBucket::kMaxSlotSpansToSort / 2);
5597 // Only the first slot spans are sorted.
5598 run_test(PartitionBucket::kMaxSlotSpansToSort * 2);
5599
5600 // Corner cases.
5601 run_test(0);
5602 run_test(1);
5603 }
5604
5605 #if BUILDFLAG(USE_FREESLOT_BITMAP)
5606 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
5607 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5608 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5609 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5610
5611 allocator.root()->Free(ptr);
5612 }
5613
5614 TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
5615 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5616 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
5617 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5618
5619 allocator.root()->Free(ptr);
5620 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5621 }
5622
5623 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
5624 void* ptr1 = allocator.root()->Alloc(
5625 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5626 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
5627 allocator.root()->Free(ptr1);
5628
5629 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5630 // Decommit the slot span. Bitmap will be rewritten in Decommit().
5631 allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
5632 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5633 }
5634
5635 TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
5636 void* ptr1 = allocator.root()->Alloc(
5637 SystemPageSize() - ExtraAllocSize(allocator), type_name);
5638 char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
5639 SystemPageSize() - ExtraAllocSize(allocator), type_name));
5640 uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
5641 allocator.root()->Free(ptr2);
5642
5643 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
5644 EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
5645 // Bitmap will be rewritten in PartitionPurgeSlotSpan().
5646 allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
5647 CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
5648 EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
5649
5650 allocator.root()->Free(ptr1);
5651 }
5652
5653 #endif // BUILDFLAG(USE_FREESLOT_BITMAP)
5654
5655 #if BUILDFLAG(USE_LARGE_EMPTY_SLOT_SPAN_RING)
5656 TEST_P(PartitionAllocTest, GlobalEmptySlotSpanRingIndexResets) {
5657 // Switch to the larger slot span size, and set the
5658 // global_empty_slot_span_ring_index to one less than max.
5659 allocator.root()->AdjustForForeground();
5660 allocator.root()->SetGlobalEmptySlotSpanRingIndexForTesting(
5661 internal::kMaxFreeableSpans - 1);
5662
5663 // Switch to the smaller size, allocate, free, and clear the empty cache.
5664 allocator.root()->AdjustForBackground();
5665 void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
5666 allocator.root()->Free(ptr);
5667 ClearEmptySlotSpanCache();
5668
5669 // This should result in 0 empty_slot_span_dirty_bytes, and more importantly,
5670 // not crash.
5671 EXPECT_EQ(
5672 0u, PA_TS_UNCHECKED_READ(allocator.root()->empty_slot_spans_dirty_bytes));
5673 }
5674 #endif
5675
5676 } // namespace partition_alloc::internal
5677
5678 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
5679