xref: /aosp_15_r20/external/scudo/standalone/tests/secondary_test.cpp (revision 76559068c068bd27e82aff38fac3bfc865233bca)
1 //===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "memtag.h"
10 #include "tests/scudo_unit_test.h"
11 
12 #include "allocator_config.h"
13 #include "allocator_config_wrapper.h"
14 #include "secondary.h"
15 
16 #include <algorithm>
17 #include <condition_variable>
18 #include <memory>
19 #include <mutex>
20 #include <random>
21 #include <stdio.h>
22 #include <thread>
23 #include <vector>
24 
getOptionsForConfig()25 template <typename Config> static scudo::Options getOptionsForConfig() {
26   if (!Config::getMaySupportMemoryTagging() ||
27       !scudo::archSupportsMemoryTagging() ||
28       !scudo::systemSupportsMemoryTagging())
29     return {};
30   scudo::AtomicOptions AO;
31   AO.set(scudo::OptionBit::UseMemoryTagging);
32   return AO.load();
33 }
34 
testSecondaryBasic(void)35 template <typename Config> static void testSecondaryBasic(void) {
36   using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
37   scudo::Options Options =
38       getOptionsForConfig<scudo::SecondaryConfig<Config>>();
39 
40   scudo::GlobalStats S;
41   S.init();
42   std::unique_ptr<SecondaryT> L(new SecondaryT);
43   L->init(&S);
44   const scudo::uptr Size = 1U << 16;
45   void *P = L->allocate(Options, Size);
46   EXPECT_NE(P, nullptr);
47   memset(P, 'A', Size);
48   EXPECT_GE(SecondaryT::getBlockSize(P), Size);
49   L->deallocate(Options, P);
50 
51   // If the Secondary can't cache that pointer, it will be unmapped.
52   if (!L->canCache(Size)) {
53     EXPECT_DEATH(
54         {
55           // Repeat few time to avoid missing crash if it's mmaped by unrelated
56           // code.
57           for (int i = 0; i < 10; ++i) {
58             P = L->allocate(Options, Size);
59             L->deallocate(Options, P);
60             memset(P, 'A', Size);
61           }
62         },
63         "");
64   }
65 
66   const scudo::uptr Align = 1U << 16;
67   P = L->allocate(Options, Size + Align, Align);
68   EXPECT_NE(P, nullptr);
69   void *AlignedP = reinterpret_cast<void *>(
70       scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
71   memset(AlignedP, 'A', Size);
72   L->deallocate(Options, P);
73 
74   std::vector<void *> V;
75   for (scudo::uptr I = 0; I < 32U; I++)
76     V.push_back(L->allocate(Options, Size));
77   std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
78   while (!V.empty()) {
79     L->deallocate(Options, V.back());
80     V.pop_back();
81   }
82   scudo::ScopedString Str;
83   L->getStats(&Str);
84   Str.output();
85   L->unmapTestOnly();
86 }
87 
88 struct NoCacheConfig {
89   static const bool MaySupportMemoryTagging = false;
90   template <typename> using TSDRegistryT = void;
91   template <typename> using PrimaryT = void;
92   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
93 
94   struct Secondary {
95     template <typename Config>
96     using CacheT = scudo::MapAllocatorNoCache<Config>;
97   };
98 };
99 
100 struct TestConfig {
101   static const bool MaySupportMemoryTagging = false;
102   template <typename> using TSDRegistryT = void;
103   template <typename> using PrimaryT = void;
104   template <typename> using SecondaryT = void;
105 
106   struct Secondary {
107     struct Cache {
108       static const scudo::u32 EntriesArraySize = 128U;
109       static const scudo::u32 QuarantineSize = 0U;
110       static const scudo::u32 DefaultMaxEntriesCount = 64U;
111       static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
112       static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
113       static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
114     };
115 
116     template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
117   };
118 };
119 
TEST(ScudoSecondaryTest,SecondaryBasic)120 TEST(ScudoSecondaryTest, SecondaryBasic) {
121   testSecondaryBasic<NoCacheConfig>();
122   testSecondaryBasic<scudo::DefaultConfig>();
123   testSecondaryBasic<TestConfig>();
124 }
125 
126 struct MapAllocatorTest : public Test {
127   using Config = scudo::DefaultConfig;
128   using LargeAllocator = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
129 
SetUpMapAllocatorTest130   void SetUp() override { Allocator->init(nullptr); }
131 
TearDownMapAllocatorTest132   void TearDown() override { Allocator->unmapTestOnly(); }
133 
134   std::unique_ptr<LargeAllocator> Allocator =
135       std::make_unique<LargeAllocator>();
136   scudo::Options Options =
137       getOptionsForConfig<scudo::SecondaryConfig<Config>>();
138 };
139 
140 // This exercises a variety of combinations of size and alignment for the
141 // MapAllocator. The size computation done here mimic the ones done by the
142 // combined allocator.
TEST_F(MapAllocatorTest,SecondaryCombinations)143 TEST_F(MapAllocatorTest, SecondaryCombinations) {
144   constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
145   constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
146   for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
147     for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
148          AlignLog++) {
149       const scudo::uptr Align = 1U << AlignLog;
150       for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
151         if ((1LL << SizeLog) + Delta <= 0)
152           continue;
153         const scudo::uptr UserSize = scudo::roundUp(
154             static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
155         const scudo::uptr Size =
156             HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
157         void *P = Allocator->allocate(Options, Size, Align);
158         EXPECT_NE(P, nullptr);
159         void *AlignedP = reinterpret_cast<void *>(
160             scudo::roundUp(reinterpret_cast<scudo::uptr>(P), Align));
161         memset(AlignedP, 0xff, UserSize);
162         Allocator->deallocate(Options, P);
163       }
164     }
165   }
166   scudo::ScopedString Str;
167   Allocator->getStats(&Str);
168   Str.output();
169 }
170 
TEST_F(MapAllocatorTest,SecondaryIterate)171 TEST_F(MapAllocatorTest, SecondaryIterate) {
172   std::vector<void *> V;
173   const scudo::uptr PageSize = scudo::getPageSizeCached();
174   for (scudo::uptr I = 0; I < 32U; I++)
175     V.push_back(Allocator->allocate(
176         Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
177   auto Lambda = [&V](scudo::uptr Block) {
178     EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
179               V.end());
180   };
181   Allocator->disable();
182   Allocator->iterateOverBlocks(Lambda);
183   Allocator->enable();
184   while (!V.empty()) {
185     Allocator->deallocate(Options, V.back());
186     V.pop_back();
187   }
188   scudo::ScopedString Str;
189   Allocator->getStats(&Str);
190   Str.output();
191 }
192 
TEST_F(MapAllocatorTest,SecondaryCacheOptions)193 TEST_F(MapAllocatorTest, SecondaryCacheOptions) {
194   if (!Allocator->canCache(0U))
195     TEST_SKIP("Secondary Cache disabled");
196 
197   // Attempt to set a maximum number of entries higher than the array size.
198   EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4096U));
199 
200   // Attempt to set an invalid (negative) number of entries
201   EXPECT_FALSE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, -1));
202 
203   // Various valid combinations.
204   EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
205   EXPECT_TRUE(
206       Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
207   EXPECT_TRUE(Allocator->canCache(1UL << 18));
208   EXPECT_TRUE(
209       Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17));
210   EXPECT_FALSE(Allocator->canCache(1UL << 18));
211   EXPECT_TRUE(Allocator->canCache(1UL << 16));
212   EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 0U));
213   EXPECT_FALSE(Allocator->canCache(1UL << 16));
214   EXPECT_TRUE(Allocator->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
215   EXPECT_TRUE(
216       Allocator->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
217   EXPECT_TRUE(Allocator->canCache(1UL << 16));
218 }
219 
220 struct MapAllocatorWithReleaseTest : public MapAllocatorTest {
SetUpMapAllocatorWithReleaseTest221   void SetUp() override { Allocator->init(nullptr, /*ReleaseToOsInterval=*/0); }
222 
performAllocationsMapAllocatorWithReleaseTest223   void performAllocations() {
224     std::vector<void *> V;
225     const scudo::uptr PageSize = scudo::getPageSizeCached();
226     {
227       std::unique_lock<std::mutex> Lock(Mutex);
228       while (!Ready)
229         Cv.wait(Lock);
230     }
231     for (scudo::uptr I = 0; I < 128U; I++) {
232       // Deallocate 75% of the blocks.
233       const bool Deallocate = (std::rand() & 3) != 0;
234       void *P = Allocator->allocate(
235           Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
236       if (Deallocate)
237         Allocator->deallocate(Options, P);
238       else
239         V.push_back(P);
240     }
241     while (!V.empty()) {
242       Allocator->deallocate(Options, V.back());
243       V.pop_back();
244     }
245   }
246 
247   std::mutex Mutex;
248   std::condition_variable Cv;
249   bool Ready = false;
250 };
251 
TEST_F(MapAllocatorWithReleaseTest,SecondaryThreadsRace)252 TEST_F(MapAllocatorWithReleaseTest, SecondaryThreadsRace) {
253   std::thread Threads[16];
254   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
255     Threads[I] =
256         std::thread(&MapAllocatorWithReleaseTest::performAllocations, this);
257   {
258     std::unique_lock<std::mutex> Lock(Mutex);
259     Ready = true;
260     Cv.notify_all();
261   }
262   for (auto &T : Threads)
263     T.join();
264   scudo::ScopedString Str;
265   Allocator->getStats(&Str);
266   Str.output();
267 }
268 
269 struct MapAllocatorCacheTest : public Test {
270   static constexpr scudo::u32 UnmappedMarker = 0xDEADBEEF;
271 
testUnmapCallbackMapAllocatorCacheTest272   static void testUnmapCallback(scudo::MemMapT &MemMap) {
273     scudo::u32 *Ptr = reinterpret_cast<scudo::u32 *>(MemMap.getBase());
274     *Ptr = UnmappedMarker;
275   }
276 
277   using SecondaryConfig = scudo::SecondaryConfig<TestConfig>;
278   using CacheConfig = SecondaryConfig::CacheConfig;
279   using CacheT = scudo::MapAllocatorCache<CacheConfig, testUnmapCallback>;
280 
281   std::unique_ptr<CacheT> Cache = std::make_unique<CacheT>();
282 
283   const scudo::uptr PageSize = scudo::getPageSizeCached();
284   // The current test allocation size is set to the maximum
285   // cache entry size
286   static constexpr scudo::uptr TestAllocSize =
287       CacheConfig::getDefaultMaxEntrySize();
288 
289   scudo::Options Options = getOptionsForConfig<SecondaryConfig>();
290 
SetUpMapAllocatorCacheTest291   void SetUp() override { Cache->init(/*ReleaseToOsInterval=*/-1); }
292 
TearDownMapAllocatorCacheTest293   void TearDown() override { Cache->unmapTestOnly(); }
294 
allocateMapAllocatorCacheTest295   scudo::MemMapT allocate(scudo::uptr Size) {
296     scudo::uptr MapSize = scudo::roundUp(Size, PageSize);
297     scudo::ReservedMemoryT ReservedMemory;
298     CHECK(ReservedMemory.create(0U, MapSize, nullptr, MAP_ALLOWNOMEM));
299 
300     scudo::MemMapT MemMap = ReservedMemory.dispatch(
301         ReservedMemory.getBase(), ReservedMemory.getCapacity());
302     MemMap.remap(MemMap.getBase(), MemMap.getCapacity(), "scudo:test",
303                  MAP_RESIZABLE | MAP_ALLOWNOMEM);
304     return MemMap;
305   }
306 
fillCacheWithSameSizeBlocksMapAllocatorCacheTest307   void fillCacheWithSameSizeBlocks(std::vector<scudo::MemMapT> &MemMaps,
308                                    scudo::uptr NumEntries, scudo::uptr Size) {
309     for (scudo::uptr I = 0; I < NumEntries; I++) {
310       MemMaps.emplace_back(allocate(Size));
311       auto &MemMap = MemMaps[I];
312       Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
313                    MemMap.getBase(), MemMap);
314     }
315   }
316 };
317 
TEST_F(MapAllocatorCacheTest,CacheOrder)318 TEST_F(MapAllocatorCacheTest, CacheOrder) {
319   std::vector<scudo::MemMapT> MemMaps;
320   Cache->setOption(scudo::Option::MaxCacheEntriesCount,
321                    CacheConfig::getEntriesArraySize());
322 
323   fillCacheWithSameSizeBlocks(MemMaps, CacheConfig::getEntriesArraySize(),
324                               TestAllocSize);
325 
326   // Retrieval order should be the inverse of insertion order
327   for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
328     scudo::uptr EntryHeaderPos;
329     scudo::CachedBlock Entry =
330         Cache->retrieve(0, TestAllocSize, PageSize, 0, EntryHeaderPos);
331     EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase());
332   }
333 
334   // Clean up MemMaps
335   for (auto &MemMap : MemMaps)
336     MemMap.unmap();
337 }
338 
TEST_F(MapAllocatorCacheTest,PartialChunkHeuristicRetrievalTest)339 TEST_F(MapAllocatorCacheTest, PartialChunkHeuristicRetrievalTest) {
340   const scudo::uptr FragmentedPages =
341       1 + scudo::CachedBlock::MaxReleasedCachePages;
342   scudo::uptr EntryHeaderPos;
343   scudo::CachedBlock Entry;
344   scudo::MemMapT MemMap = allocate(PageSize + FragmentedPages * PageSize);
345   Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
346                MemMap.getBase(), MemMap);
347 
348   // FragmentedPages > MaxAllowedFragmentedPages so PageSize
349   // cannot be retrieved from the cache
350   Entry = Cache->retrieve(/*MaxAllowedFragmentedPages=*/0, PageSize, PageSize,
351                           0, EntryHeaderPos);
352   EXPECT_FALSE(Entry.isValid());
353 
354   // FragmentedPages == MaxAllowedFragmentedPages so PageSize
355   // can be retrieved from the cache
356   Entry =
357       Cache->retrieve(FragmentedPages, PageSize, PageSize, 0, EntryHeaderPos);
358   EXPECT_TRUE(Entry.isValid());
359 
360   MemMap.unmap();
361 }
362 
TEST_F(MapAllocatorCacheTest,MemoryLeakTest)363 TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
364   std::vector<scudo::MemMapT> MemMaps;
365   // Fill the cache above MaxEntriesCount to force an eviction
366   // The first cache entry should be evicted (because it is the oldest)
367   // due to the maximum number of entries being reached
368   fillCacheWithSameSizeBlocks(
369       MemMaps, CacheConfig::getDefaultMaxEntriesCount() + 1, TestAllocSize);
370 
371   std::vector<scudo::CachedBlock> RetrievedEntries;
372 
373   // First MemMap should be evicted from cache because it was the first
374   // inserted into the cache
375   for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) {
376     scudo::uptr EntryHeaderPos;
377     RetrievedEntries.push_back(
378         Cache->retrieve(0, TestAllocSize, PageSize, 0, EntryHeaderPos));
379     EXPECT_EQ(MemMaps[I].getBase(), RetrievedEntries.back().MemMap.getBase());
380   }
381 
382   // Evicted entry should be marked due to unmap callback
383   EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(MemMaps[0].getBase()),
384             UnmappedMarker);
385 
386   // Clean up MemMaps
387   for (auto &MemMap : MemMaps)
388     MemMap.unmap();
389 }
390