1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/thread_cache.h"
6
7 #include <algorithm>
8 #include <atomic>
9 #include <vector>
10
11 #include "build/build_config.h"
12 #include "partition_alloc/extended_api.h"
13 #include "partition_alloc/internal_allocator.h"
14 #include "partition_alloc/partition_address_space.h"
15 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
16 #include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
17 #include "partition_alloc/partition_alloc_buildflags.h"
18 #include "partition_alloc/partition_alloc_config.h"
19 #include "partition_alloc/partition_alloc_for_testing.h"
20 #include "partition_alloc/partition_freelist_entry.h"
21 #include "partition_alloc/partition_lock.h"
22 #include "partition_alloc/partition_root.h"
23 #include "partition_alloc/tagging.h"
24 #include "testing/gtest/include/gtest/gtest.h"
25
26 // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
27 // cannot test the thread cache.
28 //
29 // Finally, the thread cache is not supported on all platforms.
30 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
31 PA_CONFIG(THREAD_CACHE_SUPPORTED)
32
33 namespace partition_alloc {
34
35 using BucketDistribution = PartitionRoot::BucketDistribution;
36 using PartitionFreelistEncoding = internal::PartitionFreelistEncoding;
37
38 struct ThreadCacheTestParam {
39 BucketDistribution bucket_distribution;
40 PartitionFreelistEncoding freelist_encoding;
41 };
42
43 const std::vector<ThreadCacheTestParam> params = {
44 {ThreadCacheTestParam{
45 BucketDistribution::kNeutral,
46 internal::PartitionFreelistEncoding::kPoolOffsetFreeList}},
47 {ThreadCacheTestParam{
48 BucketDistribution::kDenser,
49 internal::PartitionFreelistEncoding::kEncodedFreeList}},
50 {ThreadCacheTestParam{
51 BucketDistribution::kNeutral,
52 internal::PartitionFreelistEncoding::kPoolOffsetFreeList}},
53 {ThreadCacheTestParam{
54 BucketDistribution::kDenser,
55 internal::PartitionFreelistEncoding::kEncodedFreeList}}};
56
57 namespace {
58
59 constexpr size_t kSmallSize = 33; // Must be large enough to fit extras.
60 constexpr size_t kDefaultCountForSmallBucket =
61 ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
62 constexpr size_t kFillCountForSmallBucket =
63 kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
64
65 constexpr size_t kMediumSize = 200;
66 constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
67 constexpr size_t kFillCountForMediumBucket =
68 kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
69
70 static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
71
72 class DeltaCounter {
73 public:
DeltaCounter(uint64_t & value)74 explicit DeltaCounter(uint64_t& value)
75 : current_value_(value), initial_value_(value) {}
Reset()76 void Reset() { initial_value_ = current_value_; }
Delta() const77 uint64_t Delta() const { return current_value_ - initial_value_; }
78
79 private:
80 uint64_t& current_value_;
81 uint64_t initial_value_;
82 };
83
84 // Forbid extras, since they make finding out which bucket is used harder.
CreateAllocator(internal::PartitionFreelistEncoding encoding=internal::PartitionFreelistEncoding::kEncodedFreeList)85 std::unique_ptr<PartitionAllocatorForTesting> CreateAllocator(
86 internal::PartitionFreelistEncoding encoding =
87 internal::PartitionFreelistEncoding::kEncodedFreeList) {
88 PartitionOptions opts;
89 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
90 opts.thread_cache = PartitionOptions::kEnabled;
91 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
92 opts.star_scan_quarantine = PartitionOptions::kAllowed;
93 opts.use_pool_offset_freelists =
94 (encoding == internal::PartitionFreelistEncoding::kPoolOffsetFreeList)
95 ? PartitionOptions::kEnabled
96 : PartitionOptions::kDisabled;
97 std::unique_ptr<PartitionAllocatorForTesting> allocator =
98 std::make_unique<PartitionAllocatorForTesting>(opts);
99 allocator->root()->UncapEmptySlotSpanMemoryForTesting();
100
101 return allocator;
102 }
103 } // namespace
104
105 class PartitionAllocThreadCacheTest
106 : public ::testing::TestWithParam<ThreadCacheTestParam> {
107 public:
PartitionAllocThreadCacheTest()108 PartitionAllocThreadCacheTest()
109 : allocator_(CreateAllocator(GetParam().freelist_encoding)),
110 scope_(allocator_->root()) {}
111
~PartitionAllocThreadCacheTest()112 ~PartitionAllocThreadCacheTest() override {
113 ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
114
115 // Cleanup the global state so next test can recreate ThreadCache.
116 if (ThreadCache::IsTombstone(ThreadCache::Get())) {
117 ThreadCache::RemoveTombstoneForTesting();
118 }
119 }
120 protected:
SetUp()121 void SetUp() override {
122 PartitionRoot* root = allocator_->root();
123 switch (GetParam().bucket_distribution) {
124 case BucketDistribution::kNeutral:
125 root->ResetBucketDistributionForTesting();
126 break;
127 case BucketDistribution::kDenser:
128 root->SwitchToDenserBucketDistribution();
129 break;
130 }
131
132 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
133 ThreadCache::kDefaultMultiplier);
134 ThreadCacheRegistry::Instance().SetPurgingConfiguration(
135 kMinPurgeInterval, kMaxPurgeInterval, kDefaultPurgeInterval,
136 kMinCachedMemoryForPurgingBytes);
137 ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
138
139 // Make sure that enough slot spans have been touched, otherwise cache fill
140 // becomes unpredictable (because it doesn't take slow paths in the
141 // allocator), which is an issue for tests.
142 FillThreadCacheAndReturnIndex(kSmallSize, 1000);
143 FillThreadCacheAndReturnIndex(kMediumSize, 1000);
144
145 // There are allocations, a thread cache is created.
146 auto* tcache = root->thread_cache_for_testing();
147 ASSERT_TRUE(tcache);
148
149 ThreadCacheRegistry::Instance().ResetForTesting();
150 tcache->ResetForTesting();
151 }
152
TearDown()153 void TearDown() override {
154 auto* tcache = root()->thread_cache_for_testing();
155 ASSERT_TRUE(tcache);
156 tcache->Purge();
157
158 ASSERT_EQ(root()->get_total_size_of_allocated_bytes(), 0u);
159 }
160
root()161 PartitionRoot* root() { return allocator_->root(); }
162
163 // Returns the size of the smallest bucket fitting an allocation of
164 // |sizeof(ThreadCache)| bytes.
GetBucketSizeForThreadCache()165 size_t GetBucketSizeForThreadCache() {
166 size_t tc_bucket_index = root()->SizeToBucketIndex(
167 sizeof(ThreadCache), PartitionRoot::BucketDistribution::kNeutral);
168 auto* tc_bucket = &root()->buckets[tc_bucket_index];
169 return tc_bucket->slot_size;
170 }
171
SizeToIndex(size_t size)172 static size_t SizeToIndex(size_t size) {
173 return PartitionRoot::SizeToBucketIndex(size,
174 GetParam().bucket_distribution);
175 }
176
FillThreadCacheAndReturnIndex(size_t raw_size,size_t count=1)177 size_t FillThreadCacheAndReturnIndex(size_t raw_size, size_t count = 1) {
178 uint16_t bucket_index = SizeToIndex(raw_size);
179 std::vector<void*> allocated_data;
180
181 for (size_t i = 0; i < count; ++i) {
182 allocated_data.push_back(
183 root()->Alloc(root()->AdjustSizeForExtrasSubtract(raw_size), ""));
184 }
185 for (void* ptr : allocated_data) {
186 root()->Free(ptr);
187 }
188
189 return bucket_index;
190 }
191
FillThreadCacheWithMemory(size_t target_cached_memory)192 void FillThreadCacheWithMemory(size_t target_cached_memory) {
193 for (int batch : {1, 2, 4, 8, 16}) {
194 for (size_t raw_size = root()->AdjustSizeForExtrasAdd(1);
195 raw_size <= ThreadCache::kLargeSizeThreshold; raw_size++) {
196 FillThreadCacheAndReturnIndex(raw_size, batch);
197
198 if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
199 return;
200 }
201 }
202 }
203
204 ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
205 }
206
207 std::unique_ptr<PartitionAllocatorForTesting> allocator_;
208 internal::ThreadCacheProcessScopeForTesting scope_;
209 };
210
211 INSTANTIATE_TEST_SUITE_P(AlternateBucketDistributionAndPartitionFreeList,
212 PartitionAllocThreadCacheTest,
213 testing::ValuesIn(params));
214
TEST_P(PartitionAllocThreadCacheTest,Simple)215 TEST_P(PartitionAllocThreadCacheTest, Simple) {
216 // There is a cache.
217 auto* tcache = root()->thread_cache_for_testing();
218 EXPECT_TRUE(tcache);
219 DeltaCounter batch_fill_counter(tcache->stats_for_testing().batch_fill_count);
220
221 void* ptr =
222 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
223 ASSERT_TRUE(ptr);
224
225 uint16_t index = SizeToIndex(kSmallSize);
226 EXPECT_EQ(kFillCountForSmallBucket - 1,
227 tcache->bucket_count_for_testing(index));
228
229 root()->Free(ptr);
230 // Freeing fills the thread cache.
231 EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
232
233 void* ptr2 =
234 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
235 // MTE-untag, because Free() changes tag.
236 EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
237 // Allocated from the thread cache.
238 EXPECT_EQ(kFillCountForSmallBucket - 1,
239 tcache->bucket_count_for_testing(index));
240
241 EXPECT_EQ(1u, batch_fill_counter.Delta());
242
243 root()->Free(ptr2);
244 }
245
TEST_P(PartitionAllocThreadCacheTest,InexactSizeMatch)246 TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
247 void* ptr =
248 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
249 ASSERT_TRUE(ptr);
250
251 // There is a cache.
252 auto* tcache = root()->thread_cache_for_testing();
253 EXPECT_TRUE(tcache);
254
255 uint16_t index = SizeToIndex(kSmallSize);
256 EXPECT_EQ(kFillCountForSmallBucket - 1,
257 tcache->bucket_count_for_testing(index));
258
259 root()->Free(ptr);
260 // Freeing fills the thread cache.
261 EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
262
263 void* ptr2 =
264 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize + 1), "");
265 // MTE-untag, because Free() changes tag.
266 EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
267 // Allocated from the thread cache.
268 EXPECT_EQ(kFillCountForSmallBucket - 1,
269 tcache->bucket_count_for_testing(index));
270 root()->Free(ptr2);
271 }
272
TEST_P(PartitionAllocThreadCacheTest,MultipleObjectsCachedPerBucket)273 TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
274 auto* tcache = root()->thread_cache_for_testing();
275 DeltaCounter batch_fill_counter{tcache->stats_for_testing().batch_fill_count};
276 size_t bucket_index =
277 FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
278 EXPECT_EQ(2 * kFillCountForMediumBucket,
279 tcache->bucket_count_for_testing(bucket_index));
280 // 2 batches, since there were more than |kFillCountForMediumBucket|
281 // allocations.
282 EXPECT_EQ(2u, batch_fill_counter.Delta());
283 }
284
TEST_P(PartitionAllocThreadCacheTest,ObjectsCachedCountIsLimited)285 TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
286 size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
287 auto* tcache = root()->thread_cache_for_testing();
288 EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
289 }
290
TEST_P(PartitionAllocThreadCacheTest,Purge)291 TEST_P(PartitionAllocThreadCacheTest, Purge) {
292 size_t allocations = 10;
293 size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
294 auto* tcache = root()->thread_cache_for_testing();
295 EXPECT_EQ(
296 (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
297 tcache->bucket_count_for_testing(bucket_index));
298 tcache->Purge();
299 EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
300 }
301
TEST_P(PartitionAllocThreadCacheTest,NoCrossPartitionCache)302 TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
303 PartitionOptions opts;
304 opts.star_scan_quarantine = PartitionOptions::kAllowed;
305 PartitionAllocatorForTesting allocator(opts);
306
307 size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
308 void* ptr = allocator.root()->Alloc(
309 allocator.root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
310 ASSERT_TRUE(ptr);
311
312 auto* tcache = root()->thread_cache_for_testing();
313 EXPECT_EQ(kFillCountForSmallBucket,
314 tcache->bucket_count_for_testing(bucket_index));
315
316 allocator.root()->Free(ptr);
317 EXPECT_EQ(kFillCountForSmallBucket,
318 tcache->bucket_count_for_testing(bucket_index));
319 }
320
321 // Required to record hits and misses.
322 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,LargeAllocationsAreNotCached)323 TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
324 auto* tcache = root()->thread_cache_for_testing();
325 DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
326 DeltaCounter alloc_miss_too_large_counter{
327 tcache->stats_for_testing().alloc_miss_too_large};
328 DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
329 DeltaCounter cache_fill_misses_counter{
330 tcache->stats_for_testing().cache_fill_misses};
331
332 FillThreadCacheAndReturnIndex(100 * 1024);
333 tcache = root()->thread_cache_for_testing();
334 EXPECT_EQ(1u, alloc_miss_counter.Delta());
335 EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
336 EXPECT_EQ(1u, cache_fill_counter.Delta());
337 EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
338 }
339 #endif // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
340
TEST_P(PartitionAllocThreadCacheTest,DirectMappedAllocationsAreNotCached)341 TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
342 FillThreadCacheAndReturnIndex(1024 * 1024);
343 // The line above would crash due to out of bounds access if this wasn't
344 // properly handled.
345 }
346
347 // This tests that Realloc properly handles bookkeeping, specifically the path
348 // that reallocates in place.
TEST_P(PartitionAllocThreadCacheTest,DirectMappedReallocMetrics)349 TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
350 root()->ResetBookkeepingForTesting();
351
352 size_t expected_allocated_size = root()->get_total_size_of_allocated_bytes();
353
354 EXPECT_EQ(expected_allocated_size,
355 root()->get_total_size_of_allocated_bytes());
356 EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
357
358 void* ptr = root()->Alloc(
359 root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed), "");
360
361 EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
362 root()->get_total_size_of_allocated_bytes());
363
364 void* ptr2 = root()->Realloc(
365 ptr, root()->AdjustSizeForExtrasSubtract(9 * internal::kMaxBucketed), "");
366
367 ASSERT_EQ(ptr, ptr2);
368 EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
369 root()->get_total_size_of_allocated_bytes());
370
371 ptr2 = root()->Realloc(
372 ptr, root()->AdjustSizeForExtrasSubtract(10 * internal::kMaxBucketed),
373 "");
374
375 ASSERT_EQ(ptr, ptr2);
376 EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
377 root()->get_total_size_of_allocated_bytes());
378
379 root()->Free(ptr);
380 }
381
382 namespace {
383
FillThreadCacheAndReturnIndex(PartitionRoot * root,size_t size,BucketDistribution bucket_distribution,size_t count=1)384 size_t FillThreadCacheAndReturnIndex(PartitionRoot* root,
385 size_t size,
386 BucketDistribution bucket_distribution,
387 size_t count = 1) {
388 uint16_t bucket_index =
389 PartitionRoot::SizeToBucketIndex(size, bucket_distribution);
390 std::vector<void*> allocated_data;
391
392 for (size_t i = 0; i < count; ++i) {
393 allocated_data.push_back(
394 root->Alloc(root->AdjustSizeForExtrasSubtract(size), ""));
395 }
396 for (void* ptr : allocated_data) {
397 root->Free(ptr);
398 }
399
400 return bucket_index;
401 }
402
403 // TODO(1151236): To remove callback from PartitionAlloc's DEPS,
404 // rewrite the tests without BindLambdaForTesting and RepeatingClosure.
405 // However this makes a little annoying to add more tests using their
406 // own threads. Need to support an easier way to implement tests using
407 // PlatformThreadForTesting::Create().
408 class ThreadDelegateForMultipleThreadCaches
409 : public internal::base::PlatformThreadForTesting::Delegate {
410 public:
ThreadDelegateForMultipleThreadCaches(ThreadCache * parent_thread_cache,PartitionRoot * root,BucketDistribution bucket_distribution)411 ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
412 PartitionRoot* root,
413 BucketDistribution bucket_distribution)
414 : parent_thread_tcache_(parent_thread_cache),
415 root_(root),
416 bucket_distribution_(bucket_distribution) {}
417
ThreadMain()418 void ThreadMain() override {
419 EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
420 FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
421 auto* tcache = root_->thread_cache_for_testing();
422 EXPECT_TRUE(tcache);
423
424 EXPECT_NE(parent_thread_tcache_, tcache);
425 }
426
427 private:
428 ThreadCache* parent_thread_tcache_ = nullptr;
429 PartitionRoot* root_ = nullptr;
430 PartitionRoot::BucketDistribution bucket_distribution_;
431 };
432
433 } // namespace
434
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCaches)435 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
436 FillThreadCacheAndReturnIndex(kMediumSize);
437 auto* parent_thread_tcache = root()->thread_cache_for_testing();
438 ASSERT_TRUE(parent_thread_tcache);
439
440 ThreadDelegateForMultipleThreadCaches delegate(
441 parent_thread_tcache, root(), GetParam().bucket_distribution);
442
443 internal::base::PlatformThreadHandle thread_handle;
444 internal::base::PlatformThreadForTesting::Create(0, &delegate,
445 &thread_handle);
446 internal::base::PlatformThreadForTesting::Join(thread_handle);
447 }
448
449 namespace {
450
451 class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
452 : public internal::base::PlatformThreadForTesting::Delegate {
453 public:
ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot * root,void * & other_thread_ptr)454 ThreadDelegateForThreadCacheReclaimedWhenThreadExits(PartitionRoot* root,
455 void*& other_thread_ptr)
456 : root_(root), other_thread_ptr_(other_thread_ptr) {}
457
ThreadMain()458 void ThreadMain() override {
459 EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
460 other_thread_ptr_ =
461 root_->Alloc(root_->AdjustSizeForExtrasSubtract(kMediumSize), "");
462 root_->Free(other_thread_ptr_);
463 // |other_thread_ptr| is now in the thread cache.
464 }
465
466 private:
467 PartitionRoot* root_ = nullptr;
468 void*& other_thread_ptr_;
469 };
470
471 } // namespace
472
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheReclaimedWhenThreadExits)473 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
474 // Make sure that there is always at least one object allocated in the test
475 // bucket, so that the PartitionPage is no reclaimed.
476 //
477 // Allocate enough objects to force a cache fill at the next allocation.
478 std::vector<void*> tmp;
479 for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
480 tmp.push_back(
481 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), ""));
482 }
483
484 void* other_thread_ptr = nullptr;
485 ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
486 root(), other_thread_ptr);
487
488 internal::base::PlatformThreadHandle thread_handle;
489 internal::base::PlatformThreadForTesting::Create(0, &delegate,
490 &thread_handle);
491 internal::base::PlatformThreadForTesting::Join(thread_handle);
492
493 void* this_thread_ptr =
494 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
495 // |other_thread_ptr| was returned to the central allocator, and is returned
496 // here, as it comes from the freelist.
497 EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
498 root()->Free(other_thread_ptr);
499
500 for (void* ptr : tmp) {
501 root()->Free(ptr);
502 }
503 }
504
505 namespace {
506
507 class ThreadDelegateForThreadCacheRegistry
508 : public internal::base::PlatformThreadForTesting::Delegate {
509 public:
ThreadDelegateForThreadCacheRegistry(ThreadCache * parent_thread_cache,PartitionRoot * root,BucketDistribution bucket_distribution)510 ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
511 PartitionRoot* root,
512 BucketDistribution bucket_distribution)
513 : parent_thread_tcache_(parent_thread_cache),
514 root_(root),
515 bucket_distribution_(bucket_distribution) {}
516
ThreadMain()517 void ThreadMain() override {
518 EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
519 FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
520 auto* tcache = root_->thread_cache_for_testing();
521 EXPECT_TRUE(tcache);
522
523 internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
524 EXPECT_EQ(tcache->prev_for_testing(), nullptr);
525 EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
526 }
527
528 private:
529 ThreadCache* parent_thread_tcache_ = nullptr;
530 PartitionRoot* root_ = nullptr;
531 BucketDistribution bucket_distribution_;
532 };
533
534 } // namespace
535
TEST_P(PartitionAllocThreadCacheTest,ThreadCacheRegistry)536 TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
537 auto* parent_thread_tcache = root()->thread_cache_for_testing();
538 ASSERT_TRUE(parent_thread_tcache);
539
540 #if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || \
541 BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
542 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
543 // iOS and MacOS 15 create worker threads internally(start_wqthread).
544 // So thread caches are created for the worker threads, because the threads
545 // allocate memory for initialization (_dispatch_calloc is invoked).
546 // We cannot assume that there is only 1 thread cache here.
547
548 // Regarding Linux, ChromeOS and Android, some other tests may create
549 // non-joinable threads. E.g. FilePathWatcherTest will create
550 // non-joinable thread at InotifyReader::StartThread(). The thread will
551 // be still running after the tests are finished, and will break
552 // an assumption that there exists only main thread here.
553 {
554 internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
555 EXPECT_EQ(parent_thread_tcache->prev_for_testing(), nullptr);
556 EXPECT_EQ(parent_thread_tcache->next_for_testing(), nullptr);
557 }
558 #endif
559
560 ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root(),
561 GetParam().bucket_distribution);
562
563 internal::base::PlatformThreadHandle thread_handle;
564 internal::base::PlatformThreadForTesting::Create(0, &delegate,
565 &thread_handle);
566 internal::base::PlatformThreadForTesting::Join(thread_handle);
567
568 #if !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || \
569 BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)) && \
570 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
571 internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
572 EXPECT_EQ(parent_thread_tcache->prev_for_testing(), nullptr);
573 EXPECT_EQ(parent_thread_tcache->next_for_testing(), nullptr);
574 #endif
575 }
576
577 #if PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
TEST_P(PartitionAllocThreadCacheTest,RecordStats)578 TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
579 auto* tcache = root()->thread_cache_for_testing();
580 DeltaCounter alloc_counter{tcache->stats_for_testing().alloc_count};
581 DeltaCounter alloc_hits_counter{tcache->stats_for_testing().alloc_hits};
582 DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
583
584 DeltaCounter alloc_miss_empty_counter{
585 tcache->stats_for_testing().alloc_miss_empty};
586
587 DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
588 DeltaCounter cache_fill_hits_counter{
589 tcache->stats_for_testing().cache_fill_hits};
590 DeltaCounter cache_fill_misses_counter{
591 tcache->stats_for_testing().cache_fill_misses};
592
593 // Cache has been purged, first allocation is a miss.
594 void* data =
595 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
596 EXPECT_EQ(1u, alloc_counter.Delta());
597 EXPECT_EQ(1u, alloc_miss_counter.Delta());
598 EXPECT_EQ(0u, alloc_hits_counter.Delta());
599
600 // Cache fill worked.
601 root()->Free(data);
602 EXPECT_EQ(1u, cache_fill_counter.Delta());
603 EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
604 EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
605
606 tcache->Purge();
607 cache_fill_counter.Reset();
608 // Buckets are never full, fill always succeeds.
609 size_t allocations = 10;
610 size_t bucket_index = FillThreadCacheAndReturnIndex(
611 kMediumSize, kDefaultCountForMediumBucket + allocations);
612 EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
613 cache_fill_counter.Delta());
614 EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
615
616 // Memory footprint.
617 ThreadCacheStats stats;
618 ThreadCacheRegistry::Instance().DumpStats(true, &stats);
619 // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
620 // above the limit (-1), then refilled by batches (1 + floor(allocations /
621 // kFillCountForSmallBucket) times).
622 size_t expected_count =
623 kDefaultCountForMediumBucket / 2 - 1 +
624 (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
625 EXPECT_EQ(root()->buckets[bucket_index].slot_size * expected_count,
626 stats.bucket_total_memory);
627 EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
628 }
629
630 namespace {
631
632 class ThreadDelegateForMultipleThreadCachesAccounting
633 : public internal::base::PlatformThreadForTesting::Delegate {
634 public:
ThreadDelegateForMultipleThreadCachesAccounting(PartitionRoot * root,const ThreadCacheStats & wqthread_stats,int alloc_count,BucketDistribution bucket_distribution)635 ThreadDelegateForMultipleThreadCachesAccounting(
636 PartitionRoot* root,
637 const ThreadCacheStats& wqthread_stats,
638 int alloc_count,
639 BucketDistribution bucket_distribution)
640 : root_(root),
641 bucket_distribution_(bucket_distribution),
642 wqthread_stats_(wqthread_stats),
643 alloc_count_(alloc_count) {}
644
ThreadMain()645 void ThreadMain() override {
646 EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
647 size_t bucket_index =
648 FillThreadCacheAndReturnIndex(root_, kMediumSize, bucket_distribution_);
649
650 ThreadCacheStats stats;
651 ThreadCacheRegistry::Instance().DumpStats(false, &stats);
652 // 2* for this thread and the parent one.
653 EXPECT_EQ(
654 2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
655 stats.bucket_total_memory - wqthread_stats_.bucket_total_memory);
656 EXPECT_EQ(2 * sizeof(ThreadCache),
657 stats.metadata_overhead - wqthread_stats_.metadata_overhead);
658
659 ThreadCacheStats this_thread_cache_stats{};
660 root_->thread_cache_for_testing()->AccumulateStats(
661 &this_thread_cache_stats);
662 EXPECT_EQ(alloc_count_ + this_thread_cache_stats.alloc_count,
663 stats.alloc_count - wqthread_stats_.alloc_count);
664 }
665
666 private:
667 PartitionRoot* root_ = nullptr;
668 BucketDistribution bucket_distribution_;
669 const ThreadCacheStats wqthread_stats_;
670 const int alloc_count_;
671 };
672
673 } // namespace
674
TEST_P(PartitionAllocThreadCacheTest,MultipleThreadCachesAccounting)675 TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
676 ThreadCacheStats wqthread_stats{0};
677 #if (BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS) || \
678 BUILDFLAG(IS_LINUX)) && \
679 BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
680 {
681 // iOS and MacOS 15 create worker threads internally(start_wqthread).
682 // So thread caches are created for the worker threads, because the threads
683 // allocate memory for initialization (_dispatch_calloc is invoked).
684 // We need to count worker threads created by iOS and Mac system.
685
686 // Regarding Linux, ChromeOS and Android, some other tests may create
687 // non-joinable threads. E.g. FilePathWatcherTest will create
688 // non-joinable thread at InotifyReader::StartThread(). The thread will
689 // be still running after the tests are finished. We need to count
690 // the joinable threads here.
691 ThreadCacheRegistry::Instance().DumpStats(false, &wqthread_stats);
692
693 // Remove this thread's thread cache stats from wqthread_stats.
694 ThreadCacheStats this_stats;
695 ThreadCacheRegistry::Instance().DumpStats(true, &this_stats);
696
697 wqthread_stats.alloc_count -= this_stats.alloc_count;
698 wqthread_stats.metadata_overhead -= this_stats.metadata_overhead;
699 wqthread_stats.bucket_total_memory -= this_stats.bucket_total_memory;
700 }
701 #endif
702 FillThreadCacheAndReturnIndex(kMediumSize);
703 uint64_t alloc_count =
704 root()->thread_cache_for_testing()->stats_for_testing().alloc_count;
705
706 ThreadDelegateForMultipleThreadCachesAccounting delegate(
707 root(), wqthread_stats, alloc_count, GetParam().bucket_distribution);
708
709 internal::base::PlatformThreadHandle thread_handle;
710 internal::base::PlatformThreadForTesting::Create(0, &delegate,
711 &thread_handle);
712 internal::base::PlatformThreadForTesting::Join(thread_handle);
713 }
714
715 #endif // PA_CONFIG(THREAD_CACHE_ENABLE_STATISTICS)
716
717 // TODO(https://crbug.com/1287799): Flaky on IOS.
718 #if BUILDFLAG(IS_IOS)
719 #define MAYBE_PurgeAll DISABLED_PurgeAll
720 #else
721 #define MAYBE_PurgeAll PurgeAll
722 #endif
723
724 namespace {
725
726 class ThreadDelegateForPurgeAll
727 : public internal::base::PlatformThreadForTesting::Delegate {
728 public:
ThreadDelegateForPurgeAll(PartitionRoot * root,ThreadCache * & other_thread_tcache,std::atomic<bool> & other_thread_started,std::atomic<bool> & purge_called,int bucket_index,BucketDistribution bucket_distribution)729 ThreadDelegateForPurgeAll(PartitionRoot* root,
730 ThreadCache*& other_thread_tcache,
731 std::atomic<bool>& other_thread_started,
732 std::atomic<bool>& purge_called,
733 int bucket_index,
734 BucketDistribution bucket_distribution)
735 : root_(root),
736 other_thread_tcache_(other_thread_tcache),
737 other_thread_started_(other_thread_started),
738 purge_called_(purge_called),
739 bucket_index_(bucket_index),
740 bucket_distribution_(bucket_distribution) {}
741
ThreadMain()742 void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
743 FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_);
744 other_thread_tcache_ = root_->thread_cache_for_testing();
745
746 other_thread_started_.store(true, std::memory_order_release);
747 while (!purge_called_.load(std::memory_order_acquire)) {
748 }
749
750 // Purge() was not triggered from the other thread.
751 EXPECT_EQ(kFillCountForSmallBucket,
752 other_thread_tcache_->bucket_count_for_testing(bucket_index_));
753 // Allocations do not trigger Purge().
754 void* data =
755 root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
756 EXPECT_EQ(kFillCountForSmallBucket - 1,
757 other_thread_tcache_->bucket_count_for_testing(bucket_index_));
758 // But deallocations do.
759 root_->Free(data);
760 EXPECT_EQ(0u,
761 other_thread_tcache_->bucket_count_for_testing(bucket_index_));
762 }
763
764 private:
765 PartitionRoot* root_ = nullptr;
766 ThreadCache*& other_thread_tcache_;
767 std::atomic<bool>& other_thread_started_;
768 std::atomic<bool>& purge_called_;
769 const int bucket_index_;
770 BucketDistribution bucket_distribution_;
771 };
772
773 } // namespace
774
TEST_P(PartitionAllocThreadCacheTest,MAYBE_PurgeAll)775 TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
776 PA_NO_THREAD_SAFETY_ANALYSIS {
777 std::atomic<bool> other_thread_started{false};
778 std::atomic<bool> purge_called{false};
779
780 size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
781 ThreadCache* this_thread_tcache = root()->thread_cache_for_testing();
782 ThreadCache* other_thread_tcache = nullptr;
783
784 ThreadDelegateForPurgeAll delegate(
785 root(), other_thread_tcache, other_thread_started, purge_called,
786 bucket_index, GetParam().bucket_distribution);
787 internal::base::PlatformThreadHandle thread_handle;
788 internal::base::PlatformThreadForTesting::Create(0, &delegate,
789 &thread_handle);
790
791 while (!other_thread_started.load(std::memory_order_acquire)) {
792 }
793
794 EXPECT_EQ(kFillCountForSmallBucket,
795 this_thread_tcache->bucket_count_for_testing(bucket_index));
796 EXPECT_EQ(kFillCountForSmallBucket,
797 other_thread_tcache->bucket_count_for_testing(bucket_index));
798
799 ThreadCacheRegistry::Instance().PurgeAll();
800 // This thread is synchronously purged.
801 EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
802 // Not the other one.
803 EXPECT_EQ(kFillCountForSmallBucket,
804 other_thread_tcache->bucket_count_for_testing(bucket_index));
805
806 purge_called.store(true, std::memory_order_release);
807 internal::base::PlatformThreadForTesting::Join(thread_handle);
808 }
809
TEST_P(PartitionAllocThreadCacheTest,PeriodicPurge)810 TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
811 auto& registry = ThreadCacheRegistry::Instance();
812 auto NextInterval = [®istry]() {
813 return internal::base::Microseconds(
814 registry.GetPeriodicPurgeNextIntervalInMicroseconds());
815 };
816
817 EXPECT_EQ(NextInterval(), registry.default_purge_interval());
818
819 // Small amount of memory, the period gets longer.
820 auto* tcache = ThreadCache::Get();
821 ASSERT_LT(tcache->CachedMemory(),
822 registry.min_cached_memory_for_purging_bytes());
823 registry.RunPeriodicPurge();
824 EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
825 registry.RunPeriodicPurge();
826 EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
827
828 // Check that the purge interval is clamped at the maximum value.
829 while (NextInterval() < registry.max_purge_interval()) {
830 registry.RunPeriodicPurge();
831 }
832 registry.RunPeriodicPurge();
833
834 // Not enough memory to decrease the interval.
835 FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() + 1);
836 registry.RunPeriodicPurge();
837 EXPECT_EQ(NextInterval(), registry.max_purge_interval());
838
839 FillThreadCacheWithMemory(2 * registry.min_cached_memory_for_purging_bytes() +
840 1);
841 registry.RunPeriodicPurge();
842 EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
843
844 // Enough memory, interval doesn't change.
845 FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes());
846 registry.RunPeriodicPurge();
847 EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
848
849 // No cached memory, increase the interval.
850 registry.RunPeriodicPurge();
851 EXPECT_EQ(NextInterval(), registry.max_purge_interval());
852
853 // Cannot test the very large size with only one thread, this is tested below
854 // in the multiple threads test.
855 }
856
857 namespace {
858
FillThreadCacheWithMemory(PartitionRoot * root,size_t target_cached_memory,BucketDistribution bucket_distribution)859 void FillThreadCacheWithMemory(PartitionRoot* root,
860 size_t target_cached_memory,
861 BucketDistribution bucket_distribution) {
862 for (int batch : {1, 2, 4, 8, 16}) {
863 for (size_t allocation_size = 1;
864 allocation_size <= ThreadCache::kLargeSizeThreshold;
865 allocation_size++) {
866 FillThreadCacheAndReturnIndex(
867 root, root->AdjustSizeForExtrasAdd(allocation_size),
868 bucket_distribution, batch);
869
870 if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
871 return;
872 }
873 }
874 }
875
876 ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
877 }
878
879 class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
880 : public internal::base::PlatformThreadForTesting::Delegate {
881 public:
ThreadDelegateForPeriodicPurgeSumsOverAllThreads(PartitionRoot * root,std::atomic<int> & allocations_done,std::atomic<bool> & can_finish,BucketDistribution bucket_distribution)882 ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
883 PartitionRoot* root,
884 std::atomic<int>& allocations_done,
885 std::atomic<bool>& can_finish,
886 BucketDistribution bucket_distribution)
887 : root_(root),
888 allocations_done_(allocations_done),
889 can_finish_(can_finish),
890 bucket_distribution_(bucket_distribution) {}
891
ThreadMain()892 void ThreadMain() override {
893 FillThreadCacheWithMemory(root_,
894 5 * ThreadCacheRegistry::Instance()
895 .min_cached_memory_for_purging_bytes(),
896 bucket_distribution_);
897 allocations_done_.fetch_add(1, std::memory_order_release);
898
899 // This thread needs to be alive when the next periodic purge task runs.
900 while (!can_finish_.load(std::memory_order_acquire)) {
901 }
902 }
903
904 private:
905 PartitionRoot* root_ = nullptr;
906 std::atomic<int>& allocations_done_;
907 std::atomic<bool>& can_finish_;
908 BucketDistribution bucket_distribution_;
909 };
910
911 } // namespace
912
913 // Disabled due to flakiness: crbug.com/1220371
TEST_P(PartitionAllocThreadCacheTest,DISABLED_PeriodicPurgeSumsOverAllThreads)914 TEST_P(PartitionAllocThreadCacheTest,
915 DISABLED_PeriodicPurgeSumsOverAllThreads) {
916 auto& registry = ThreadCacheRegistry::Instance();
917 auto NextInterval = [®istry]() {
918 return internal::base::Microseconds(
919 registry.GetPeriodicPurgeNextIntervalInMicroseconds());
920 };
921 EXPECT_EQ(NextInterval(), registry.default_purge_interval());
922
923 // Small amount of memory, the period gets longer.
924 auto* tcache = ThreadCache::Get();
925 ASSERT_LT(tcache->CachedMemory(),
926 registry.min_cached_memory_for_purging_bytes());
927 registry.RunPeriodicPurge();
928 EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
929 registry.RunPeriodicPurge();
930 EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
931
932 // Check that the purge interval is clamped at the maximum value.
933 while (NextInterval() < registry.max_purge_interval()) {
934 registry.RunPeriodicPurge();
935 }
936 registry.RunPeriodicPurge();
937
938 // Not enough memory on this thread to decrease the interval.
939 FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() / 2);
940 registry.RunPeriodicPurge();
941 EXPECT_EQ(NextInterval(), registry.max_purge_interval());
942
943 std::atomic<int> allocations_done{0};
944 std::atomic<bool> can_finish{false};
945 ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
946 root(), allocations_done, can_finish, GetParam().bucket_distribution);
947
948 internal::base::PlatformThreadHandle thread_handle;
949 internal::base::PlatformThreadForTesting::Create(0, &delegate,
950 &thread_handle);
951 internal::base::PlatformThreadHandle thread_handle_2;
952 internal::base::PlatformThreadForTesting::Create(0, &delegate,
953 &thread_handle_2);
954
955 while (allocations_done.load(std::memory_order_acquire) != 2) {
956 internal::base::PlatformThreadForTesting::YieldCurrentThread();
957 }
958
959 // Many allocations on the other thread.
960 registry.RunPeriodicPurge();
961 EXPECT_EQ(NextInterval(), registry.default_purge_interval());
962
963 can_finish.store(true, std::memory_order_release);
964 internal::base::PlatformThreadForTesting::Join(thread_handle);
965 internal::base::PlatformThreadForTesting::Join(thread_handle_2);
966 }
967
968 // TODO(https://crbug.com/1287799): Flaky on IOS.
969 #if BUILDFLAG(IS_IOS)
970 #define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
971 #else
972 #define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
973 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucket)974 TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
975 auto* tcache = root()->thread_cache_for_testing();
976 size_t bucket_index =
977 FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
978
979 EXPECT_EQ(kDefaultCountForMediumBucket,
980 tcache->bucket_for_testing(bucket_index).count);
981
982 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
983 ThreadCache::kDefaultMultiplier / 2);
984 // No immediate batch deallocation.
985 EXPECT_EQ(kDefaultCountForMediumBucket,
986 tcache->bucket_for_testing(bucket_index).count);
987 void* data =
988 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
989 // Not triggered by allocations.
990 EXPECT_EQ(kDefaultCountForMediumBucket - 1,
991 tcache->bucket_for_testing(bucket_index).count);
992
993 // Free() triggers the purge within limits.
994 root()->Free(data);
995 EXPECT_LE(tcache->bucket_for_testing(bucket_index).count,
996 kDefaultCountForMediumBucket / 2);
997
998 // Won't go above anymore.
999 FillThreadCacheAndReturnIndex(kMediumSize, 1000);
1000 EXPECT_LE(tcache->bucket_for_testing(bucket_index).count,
1001 kDefaultCountForMediumBucket / 2);
1002
1003 // Limit can be raised.
1004 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1005 ThreadCache::kDefaultMultiplier * 2);
1006 FillThreadCacheAndReturnIndex(kMediumSize, 1000);
1007 EXPECT_GT(tcache->bucket_for_testing(bucket_index).count,
1008 kDefaultCountForMediumBucket / 2);
1009 }
1010
TEST_P(PartitionAllocThreadCacheTest,DynamicCountPerBucketClamping)1011 TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
1012 auto* tcache = root()->thread_cache_for_testing();
1013
1014 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1015 ThreadCache::kDefaultMultiplier / 1000.);
1016 for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
1017 // Invalid bucket.
1018 if (!tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed)) {
1019 EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
1020 continue;
1021 }
1022 EXPECT_GE(
1023 tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed),
1024 1u);
1025 }
1026
1027 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1028 ThreadCache::kDefaultMultiplier * 1000.);
1029 for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
1030 // Invalid bucket.
1031 if (!tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed)) {
1032 EXPECT_EQ(root()->buckets[i].active_slot_spans_head, nullptr);
1033 continue;
1034 }
1035 EXPECT_LT(
1036 tcache->bucket_for_testing(i).limit.load(std::memory_order_relaxed),
1037 0xff);
1038 }
1039 }
1040
1041 // TODO(https://crbug.com/1287799): Flaky on IOS.
1042 #if BUILDFLAG(IS_IOS)
1043 #define MAYBE_DynamicCountPerBucketMultipleThreads \
1044 DISABLED_DynamicCountPerBucketMultipleThreads
1045 #else
1046 #define MAYBE_DynamicCountPerBucketMultipleThreads \
1047 DynamicCountPerBucketMultipleThreads
1048 #endif
1049
1050 namespace {
1051
1052 class ThreadDelegateForDynamicCountPerBucketMultipleThreads
1053 : public internal::base::PlatformThreadForTesting::Delegate {
1054 public:
ThreadDelegateForDynamicCountPerBucketMultipleThreads(PartitionRoot * root,std::atomic<bool> & other_thread_started,std::atomic<bool> & threshold_changed,int bucket_index,BucketDistribution bucket_distribution)1055 ThreadDelegateForDynamicCountPerBucketMultipleThreads(
1056 PartitionRoot* root,
1057 std::atomic<bool>& other_thread_started,
1058 std::atomic<bool>& threshold_changed,
1059 int bucket_index,
1060 BucketDistribution bucket_distribution)
1061 : root_(root),
1062 other_thread_started_(other_thread_started),
1063 threshold_changed_(threshold_changed),
1064 bucket_index_(bucket_index),
1065 bucket_distribution_(bucket_distribution) {}
1066
ThreadMain()1067 void ThreadMain() override {
1068 FillThreadCacheAndReturnIndex(root_, kSmallSize, bucket_distribution_,
1069 kDefaultCountForSmallBucket + 10);
1070 auto* this_thread_tcache = root_->thread_cache_for_testing();
1071 // More than the default since the multiplier has changed.
1072 EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
1073 kDefaultCountForSmallBucket + 10);
1074
1075 other_thread_started_.store(true, std::memory_order_release);
1076 while (!threshold_changed_.load(std::memory_order_acquire)) {
1077 }
1078
1079 void* data =
1080 root_->Alloc(root_->AdjustSizeForExtrasSubtract(kSmallSize), "");
1081 // Deallocations trigger limit enforcement.
1082 root_->Free(data);
1083 // Since the bucket is too full, it gets halved by batched deallocation.
1084 EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
1085 this_thread_tcache->bucket_count_for_testing(bucket_index_));
1086 }
1087
1088 private:
1089 PartitionRoot* root_ = nullptr;
1090 std::atomic<bool>& other_thread_started_;
1091 std::atomic<bool>& threshold_changed_;
1092 const int bucket_index_;
1093 PartitionRoot::BucketDistribution bucket_distribution_;
1094 };
1095
1096 } // namespace
1097
TEST_P(PartitionAllocThreadCacheTest,MAYBE_DynamicCountPerBucketMultipleThreads)1098 TEST_P(PartitionAllocThreadCacheTest,
1099 MAYBE_DynamicCountPerBucketMultipleThreads) {
1100 std::atomic<bool> other_thread_started{false};
1101 std::atomic<bool> threshold_changed{false};
1102
1103 auto* tcache = root()->thread_cache_for_testing();
1104 size_t bucket_index =
1105 FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
1106 EXPECT_EQ(kDefaultCountForSmallBucket,
1107 tcache->bucket_for_testing(bucket_index).count);
1108
1109 // Change the ratio before starting the threads, checking that it will applied
1110 // to newly-created threads.
1111 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
1112 ThreadCache::kDefaultMultiplier + 1);
1113
1114 ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
1115 root(), other_thread_started, threshold_changed, bucket_index,
1116 GetParam().bucket_distribution);
1117
1118 internal::base::PlatformThreadHandle thread_handle;
1119 internal::base::PlatformThreadForTesting::Create(0, &delegate,
1120 &thread_handle);
1121
1122 while (!other_thread_started.load(std::memory_order_acquire)) {
1123 }
1124
1125 ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
1126 threshold_changed.store(true, std::memory_order_release);
1127
1128 internal::base::PlatformThreadForTesting::Join(thread_handle);
1129 }
1130
TEST_P(PartitionAllocThreadCacheTest,DynamicSizeThreshold)1131 TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
1132 auto* tcache = root()->thread_cache_for_testing();
1133 DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
1134 DeltaCounter alloc_miss_too_large_counter{
1135 tcache->stats_for_testing().alloc_miss_too_large};
1136 DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
1137 DeltaCounter cache_fill_misses_counter{
1138 tcache->stats_for_testing().cache_fill_misses};
1139
1140 // Default threshold at first.
1141 ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1142 FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
1143
1144 EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1145 EXPECT_EQ(1u, cache_fill_counter.Delta());
1146
1147 // Too large to be cached.
1148 FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1149 EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1150
1151 // Increase.
1152 ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1153 FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1154 // No new miss.
1155 EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1156
1157 // Lower.
1158 ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1159 FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
1160 EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
1161
1162 // Value is clamped.
1163 size_t too_large = 1024 * 1024;
1164 ThreadCache::SetLargestCachedSize(too_large);
1165 FillThreadCacheAndReturnIndex(too_large);
1166 EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
1167 }
1168
1169 // Disabled due to flakiness: crbug.com/1287811
TEST_P(PartitionAllocThreadCacheTest,DISABLED_DynamicSizeThresholdPurge)1170 TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
1171 auto* tcache = root()->thread_cache_for_testing();
1172 DeltaCounter alloc_miss_counter{tcache->stats_for_testing().alloc_misses};
1173 DeltaCounter alloc_miss_too_large_counter{
1174 tcache->stats_for_testing().alloc_miss_too_large};
1175 DeltaCounter cache_fill_counter{tcache->stats_for_testing().cache_fill_count};
1176 DeltaCounter cache_fill_misses_counter{
1177 tcache->stats_for_testing().cache_fill_misses};
1178
1179 // Cache large allocations.
1180 size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
1181 ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
1182 size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
1183 EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
1184
1185 // Lower.
1186 ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
1187 FillThreadCacheAndReturnIndex(large_allocation_size);
1188 EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
1189
1190 // There is memory trapped in the cache bucket.
1191 EXPECT_GT(tcache->bucket_for_testing(index).count, 0u);
1192
1193 // Which is reclaimed by Purge().
1194 tcache->Purge();
1195 EXPECT_EQ(0u, tcache->bucket_for_testing(index).count);
1196 }
1197
TEST_P(PartitionAllocThreadCacheTest,ClearFromTail)1198 TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
1199 auto count_items = [this](ThreadCache* tcache, size_t index) {
1200 const internal::PartitionFreelistDispatcher* freelist_dispatcher =
1201 this->root()->get_freelist_dispatcher();
1202 uint8_t count = 0;
1203 auto* head = tcache->bucket_for_testing(index).freelist_head;
1204 while (head) {
1205 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
1206 head = freelist_dispatcher->GetNextForThreadCacheTrue(
1207 head, tcache->bucket_for_testing(index).slot_size);
1208 #else
1209 head = freelist_dispatcher->GetNextForThreadCache<true>(
1210 head, tcache->bucket_for_testing(index).slot_size);
1211 #endif // USE_FREELIST_POOL_OFFSETS
1212 count++;
1213 }
1214 return count;
1215 };
1216
1217 auto* tcache = root()->thread_cache_for_testing();
1218 size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
1219 ASSERT_GE(count_items(tcache, index), 10);
1220 void* head = tcache->bucket_for_testing(index).freelist_head;
1221
1222 for (size_t limit : {8, 3, 1}) {
1223 tcache->ClearBucketForTesting(tcache->bucket_for_testing(index), limit);
1224 EXPECT_EQ(head, static_cast<void*>(
1225 tcache->bucket_for_testing(index).freelist_head));
1226 EXPECT_EQ(count_items(tcache, index), limit);
1227 }
1228 tcache->ClearBucketForTesting(tcache->bucket_for_testing(index), 0);
1229 EXPECT_EQ(nullptr, static_cast<void*>(
1230 tcache->bucket_for_testing(index).freelist_head));
1231 }
1232
1233 // TODO(https://crbug.com/1287799): Flaky on IOS.
1234 #if BUILDFLAG(IS_IOS)
1235 #define MAYBE_Bookkeeping DISABLED_Bookkeeping
1236 #else
1237 #define MAYBE_Bookkeeping Bookkeeping
1238 #endif
TEST_P(PartitionAllocThreadCacheTest,MAYBE_Bookkeeping)1239 TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
1240 void* arr[kFillCountForMediumBucket] = {};
1241 auto* tcache = root()->thread_cache_for_testing();
1242
1243 root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
1244 PurgeFlags::kDiscardUnusedSystemPages);
1245 root()->ResetBookkeepingForTesting();
1246
1247 size_t expected_allocated_size = 0;
1248 size_t expected_committed_size = 0;
1249
1250 EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1251 EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1252 EXPECT_EQ(expected_allocated_size,
1253 root()->get_total_size_of_allocated_bytes());
1254 EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1255
1256 void* ptr =
1257 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1258
1259 auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1260 size_t medium_alloc_size = medium_bucket->slot_size;
1261 expected_allocated_size += medium_alloc_size;
1262 expected_committed_size += kUseLazyCommit
1263 ? internal::SystemPageSize()
1264 : medium_bucket->get_bytes_per_span();
1265
1266 EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1267 EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1268 EXPECT_EQ(expected_allocated_size,
1269 root()->get_total_size_of_allocated_bytes());
1270 EXPECT_EQ(expected_allocated_size, root()->get_max_size_of_allocated_bytes());
1271
1272 expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
1273
1274 // These allocations all come from the thread-cache.
1275 for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
1276 arr[i] =
1277 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1278 EXPECT_EQ(expected_committed_size, root()->total_size_of_committed_pages);
1279 EXPECT_EQ(expected_committed_size, root()->max_size_of_committed_pages);
1280 EXPECT_EQ(expected_allocated_size,
1281 root()->get_total_size_of_allocated_bytes());
1282 EXPECT_EQ(expected_allocated_size,
1283 root()->get_max_size_of_allocated_bytes());
1284 EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
1285 tcache->CachedMemory());
1286 }
1287
1288 EXPECT_EQ(0U, tcache->CachedMemory());
1289
1290 root()->Free(ptr);
1291
1292 for (auto*& el : arr) {
1293 root()->Free(el);
1294 }
1295 EXPECT_EQ(root()->get_total_size_of_allocated_bytes(),
1296 expected_allocated_size);
1297 tcache->Purge();
1298 }
1299
TEST_P(PartitionAllocThreadCacheTest,TryPurgeNoAllocs)1300 TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
1301 auto* tcache = root()->thread_cache_for_testing();
1302 tcache->TryPurge();
1303 }
1304
TEST_P(PartitionAllocThreadCacheTest,TryPurgeMultipleCorrupted)1305 TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
1306 auto* tcache = root()->thread_cache_for_testing();
1307
1308 void* ptr =
1309 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kMediumSize), "");
1310
1311 auto* medium_bucket = root()->buckets + SizeToIndex(kMediumSize);
1312
1313 auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
1314 const internal::PartitionFreelistDispatcher* freelist_dispatcher =
1315 root()->get_freelist_dispatcher();
1316 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
1317 curr = freelist_dispatcher->GetNextForThreadCacheTrue(curr, kMediumSize);
1318 #else
1319 curr = freelist_dispatcher->GetNextForThreadCache<true>(curr, kMediumSize);
1320 #endif // USE_FREELIST_POOL_OFFSETS
1321 freelist_dispatcher->CorruptNextForTesting(curr, 0x12345678);
1322 tcache->TryPurge();
1323 freelist_dispatcher->SetNext(curr, nullptr);
1324 root()->Free(ptr);
1325 }
1326
TEST(AlternateBucketDistributionTest,SizeToIndex)1327 TEST(AlternateBucketDistributionTest, SizeToIndex) {
1328 using internal::BucketIndexLookup;
1329
1330 // The first 12 buckets are the same as the default bucket index.
1331 for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
1332 for (size_t offset = 0; offset < 4; offset++) {
1333 size_t n = i * (4 + offset) / 4;
1334 EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1335 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1336 }
1337 }
1338
1339 // The alternate bucket distribution is different in the middle values.
1340 //
1341 // For each order, the top two buckets are removed compared with the default
1342 // distribution. Values that would be allocated in those two buckets are
1343 // instead allocated in the next power of two bucket.
1344 //
1345 // The first two buckets (each power of two and the next bucket up) remain
1346 // the same between the two bucket distributions.
1347 size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
1348 for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
1349 i <<= 1) {
1350 // The first two buckets in the order should match up to the normal bucket
1351 // distribution.
1352 for (size_t offset = 0; offset < 2; offset++) {
1353 size_t n = i * (4 + offset) / 4;
1354 EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1355 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1356 EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1357 expected_index += 2;
1358 }
1359 // The last two buckets in the order are "rounded up" to the same bucket
1360 // as the next power of two.
1361 expected_index += 4;
1362 for (size_t offset = 2; offset < 4; offset++) {
1363 size_t n = i * (4 + offset) / 4;
1364 // These two are rounded up in the alternate distribution, so we expect
1365 // the bucket index to be larger than the bucket index for the same
1366 // allocation under the default distribution.
1367 EXPECT_GT(BucketIndexLookup::GetIndex(n),
1368 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1369 // We expect both allocations in this loop to be rounded up to the next
1370 // power of two bucket.
1371 EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
1372 }
1373 }
1374
1375 // The rest of the buckets all match up exactly with the existing
1376 // bucket distribution.
1377 for (size_t i = internal::kHighThresholdForAlternateDistribution;
1378 i < internal::kMaxBucketed; i <<= 1) {
1379 for (size_t offset = 0; offset < 4; offset++) {
1380 size_t n = i * (4 + offset) / 4;
1381 EXPECT_EQ(BucketIndexLookup::GetIndex(n),
1382 BucketIndexLookup::GetIndexForNeutralBuckets(n));
1383 }
1384 }
1385 }
1386
TEST_P(PartitionAllocThreadCacheTest,AllocationRecording)1387 TEST_P(PartitionAllocThreadCacheTest, AllocationRecording) {
1388 // There is a cache.
1389 auto* tcache = root()->thread_cache_for_testing();
1390 EXPECT_TRUE(tcache);
1391 tcache->ResetPerThreadAllocationStatsForTesting();
1392
1393 constexpr size_t kBucketedNotCached = 1 << 12;
1394 constexpr size_t kDirectMapped = 4 * (1 << 20);
1395 // Not a "nice" size on purpose, to check that the raw size accounting works.
1396 const size_t kSingleSlot = internal::PartitionPageSize() + 1;
1397
1398 size_t expected_total_size = 0;
1399 void* ptr =
1400 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSmallSize), "");
1401 ASSERT_TRUE(ptr);
1402 expected_total_size += root()->GetUsableSize(ptr);
1403 void* ptr2 = root()->Alloc(
1404 root()->AdjustSizeForExtrasSubtract(kBucketedNotCached), "");
1405 ASSERT_TRUE(ptr2);
1406 expected_total_size += root()->GetUsableSize(ptr2);
1407 void* ptr3 =
1408 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kDirectMapped), "");
1409 ASSERT_TRUE(ptr3);
1410 expected_total_size += root()->GetUsableSize(ptr3);
1411 void* ptr4 =
1412 root()->Alloc(root()->AdjustSizeForExtrasSubtract(kSingleSlot), "");
1413 ASSERT_TRUE(ptr4);
1414 expected_total_size += root()->GetUsableSize(ptr4);
1415
1416 EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1417 EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1418
1419 root()->Free(ptr);
1420 root()->Free(ptr2);
1421 root()->Free(ptr3);
1422 root()->Free(ptr4);
1423
1424 EXPECT_EQ(4u, tcache->thread_alloc_stats().alloc_count);
1425 EXPECT_EQ(expected_total_size, tcache->thread_alloc_stats().alloc_total_size);
1426 EXPECT_EQ(4u, tcache->thread_alloc_stats().dealloc_count);
1427 EXPECT_EQ(expected_total_size,
1428 tcache->thread_alloc_stats().dealloc_total_size);
1429
1430 auto stats = internal::GetAllocStatsForCurrentThread();
1431 EXPECT_EQ(4u, stats.alloc_count);
1432 EXPECT_EQ(expected_total_size, stats.alloc_total_size);
1433 EXPECT_EQ(4u, stats.dealloc_count);
1434 EXPECT_EQ(expected_total_size, stats.dealloc_total_size);
1435 }
1436
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingAligned)1437 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingAligned) {
1438 // There is a cache.
1439 auto* tcache = root()->thread_cache_for_testing();
1440 EXPECT_TRUE(tcache);
1441 tcache->ResetPerThreadAllocationStatsForTesting();
1442
1443 // Aligned allocations take different paths depending on whether they are (in
1444 // the same order as the test cases below):
1445 // - Not really aligned (since alignment is always good-enough)
1446 // - Already satisfied by PA's alignment guarantees
1447 // - Requiring extra padding
1448 // - Already satisfied by PA's alignment guarantees
1449 // - In need of a special slot span (very large alignment)
1450 // - Direct-mapped with large alignment
1451 size_t alloc_count = 0;
1452 size_t total_size = 0;
1453 size_t size_alignments[][2] = {{128, 4},
1454 {128, 128},
1455 {1024, 128},
1456 {128, 1024},
1457 {128, 2 * internal::PartitionPageSize()},
1458 {(4 << 20) + 1, 1 << 19}};
1459 for (auto [requested_size, alignment] : size_alignments) {
1460 void* ptr = root()->AlignedAlloc(alignment, requested_size);
1461 ASSERT_TRUE(ptr);
1462 alloc_count++;
1463 total_size += root()->GetUsableSize(ptr);
1464 EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1465 EXPECT_EQ(total_size, tcache->thread_alloc_stats().alloc_total_size);
1466 root()->Free(ptr);
1467 EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().dealloc_count);
1468 EXPECT_EQ(total_size, tcache->thread_alloc_stats().dealloc_total_size);
1469 }
1470
1471 EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1472 tcache->thread_alloc_stats().dealloc_total_size);
1473
1474 auto stats = internal::GetAllocStatsForCurrentThread();
1475 EXPECT_EQ(alloc_count, stats.alloc_count);
1476 EXPECT_EQ(total_size, stats.alloc_total_size);
1477 EXPECT_EQ(alloc_count, stats.dealloc_count);
1478 EXPECT_EQ(total_size, stats.dealloc_total_size);
1479 }
1480
TEST_P(PartitionAllocThreadCacheTest,AllocationRecordingRealloc)1481 TEST_P(PartitionAllocThreadCacheTest, AllocationRecordingRealloc) {
1482 // There is a cache.
1483 auto* tcache = root()->thread_cache_for_testing();
1484 EXPECT_TRUE(tcache);
1485 tcache->ResetPerThreadAllocationStatsForTesting();
1486
1487 size_t alloc_count = 0;
1488 size_t dealloc_count = 0;
1489 size_t total_alloc_size = 0;
1490 size_t total_dealloc_size = 0;
1491 size_t size_new_sizes[][2] = {
1492 {16, 15},
1493 {16, 64},
1494 {16, internal::PartitionPageSize() + 1},
1495 {4 << 20, 8 << 20},
1496 {8 << 20, 4 << 20},
1497 {(8 << 20) - internal::SystemPageSize(), 8 << 20}};
1498 for (auto [size, new_size] : size_new_sizes) {
1499 void* ptr = root()->Alloc(size);
1500 ASSERT_TRUE(ptr);
1501 alloc_count++;
1502 size_t usable_size = root()->GetUsableSize(ptr);
1503 total_alloc_size += usable_size;
1504
1505 ptr = root()->Realloc(ptr, new_size, "");
1506 ASSERT_TRUE(ptr);
1507 total_dealloc_size += usable_size;
1508 dealloc_count++;
1509 usable_size = root()->GetUsableSize(ptr);
1510 total_alloc_size += usable_size;
1511 alloc_count++;
1512
1513 EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1514 EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1515 EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1516 EXPECT_EQ(total_dealloc_size,
1517 tcache->thread_alloc_stats().dealloc_total_size)
1518 << new_size;
1519
1520 root()->Free(ptr);
1521 dealloc_count++;
1522 total_dealloc_size += usable_size;
1523
1524 EXPECT_EQ(alloc_count, tcache->thread_alloc_stats().alloc_count);
1525 EXPECT_EQ(total_alloc_size, tcache->thread_alloc_stats().alloc_total_size);
1526 EXPECT_EQ(dealloc_count, tcache->thread_alloc_stats().dealloc_count);
1527 EXPECT_EQ(total_dealloc_size,
1528 tcache->thread_alloc_stats().dealloc_total_size);
1529 }
1530 EXPECT_EQ(tcache->thread_alloc_stats().alloc_total_size,
1531 tcache->thread_alloc_stats().dealloc_total_size);
1532 }
1533
1534 // This test makes sure it's safe to switch to the alternate bucket distribution
1535 // at runtime. This is intended to happen once, near the start of Chrome,
1536 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchBeforeAlloc)1537 TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
1538 std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1539 PartitionRoot* root = allocator->root();
1540
1541 root->SwitchToDenserBucketDistribution();
1542 constexpr size_t n = (1 << 12) * 3 / 2;
1543 EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1544 internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
1545
1546 void* ptr = root->Alloc(n);
1547
1548 root->ResetBucketDistributionForTesting();
1549
1550 root->Free(ptr);
1551 }
1552
1553 // This test makes sure it's safe to switch to the alternate bucket distribution
1554 // at runtime. This is intended to happen once, near the start of Chrome,
1555 // once we have enabled features.
TEST(AlternateBucketDistributionTest,SwitchAfterAlloc)1556 TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
1557 std::unique_ptr<PartitionAllocatorForTesting> allocator(CreateAllocator());
1558 constexpr size_t n = (1 << 12) * 3 / 2;
1559 EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
1560 internal::BucketIndexLookup::GetIndexForNeutralBuckets(n));
1561
1562 PartitionRoot* root = allocator->root();
1563 void* ptr = root->Alloc(n);
1564
1565 root->SwitchToDenserBucketDistribution();
1566
1567 void* ptr2 = root->Alloc(n);
1568
1569 root->Free(ptr2);
1570 root->Free(ptr);
1571 }
1572
1573 } // namespace partition_alloc
1574
1575 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
1576 // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1577