xref: /aosp_15_r20/external/scudo/standalone/tests/tsd_test.cpp (revision 76559068c068bd27e82aff38fac3bfc865233bca)
1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
13 
14 #include <stdlib.h>
15 
16 #include <condition_variable>
17 #include <mutex>
18 #include <set>
19 #include <thread>
20 #include <type_traits>
21 
22 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
23 // cache contains a single volatile uptr, to be able to test that several
24 // concurrent threads will not access or modify the same cache at the same time.
25 template <class Config> class MockAllocator {
26 public:
27   using ThisT = MockAllocator<Config>;
28   using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
29   using CacheT = struct MockCache {
30     volatile scudo::uptr Canary;
31   };
32   using QuarantineCacheT = struct MockQuarantine {};
33 
init()34   void init() {
35     // This should only be called once by the registry.
36     EXPECT_FALSE(Initialized);
37     Initialized = true;
38   }
39 
unmapTestOnly()40   void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
initCache(CacheT * Cache)41   void initCache(CacheT *Cache) { *Cache = {}; }
commitBack(UNUSED scudo::TSD<MockAllocator> * TSD)42   void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
getTSDRegistry()43   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
callPostInitCallback()44   void callPostInitCallback() {}
45 
isInitialized()46   bool isInitialized() { return Initialized; }
47 
operator new(size_t Size)48   void *operator new(size_t Size) {
49     void *P = nullptr;
50     EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
51     return P;
52   }
operator delete(void * P)53   void operator delete(void *P) { free(P); }
54 
55 private:
56   bool Initialized = false;
57   TSDRegistryT TSDRegistry;
58 };
59 
60 struct OneCache {
61   template <class Allocator>
62   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
63 };
64 
65 struct SharedCaches {
66   template <class Allocator>
67   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
68 };
69 
70 struct ExclusiveCaches {
71   template <class Allocator>
72   using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
73 };
74 
TEST(ScudoTSDTest,TSDRegistryInit)75 TEST(ScudoTSDTest, TSDRegistryInit) {
76   using AllocatorT = MockAllocator<OneCache>;
77   auto Deleter = [](AllocatorT *A) {
78     A->unmapTestOnly();
79     delete A;
80   };
81   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
82                                                            Deleter);
83   EXPECT_FALSE(Allocator->isInitialized());
84 
85   auto Registry = Allocator->getTSDRegistry();
86   Registry->initOnceMaybe(Allocator.get());
87   EXPECT_TRUE(Allocator->isInitialized());
88 }
89 
90 template <class AllocatorT>
testRegistry()91 static void testRegistry() NO_THREAD_SAFETY_ANALYSIS {
92   auto Deleter = [](AllocatorT *A) {
93     A->unmapTestOnly();
94     delete A;
95   };
96   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
97                                                            Deleter);
98   EXPECT_FALSE(Allocator->isInitialized());
99 
100   auto Registry = Allocator->getTSDRegistry();
101   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
102   EXPECT_TRUE(Allocator->isInitialized());
103 
104   {
105     typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
106     EXPECT_EQ(TSD->getCache().Canary, 0U);
107   }
108 
109   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
110   {
111     typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
112     EXPECT_EQ(TSD->getCache().Canary, 0U);
113     memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
114   }
115 }
116 
TEST(ScudoTSDTest,TSDRegistryBasic)117 TEST(ScudoTSDTest, TSDRegistryBasic) {
118   testRegistry<MockAllocator<OneCache>>();
119   testRegistry<MockAllocator<SharedCaches>>();
120 #if !SCUDO_FUCHSIA
121   testRegistry<MockAllocator<ExclusiveCaches>>();
122 #endif
123 }
124 
125 static std::mutex Mutex;
126 static std::condition_variable Cv;
127 static bool Ready;
128 
129 // Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
130 // using thread-safety analysis. Alternatively, we verify the thread safety
131 // through a runtime check in ScopedTSD and mark the test body with
132 // NO_THREAD_SAFETY_ANALYSIS.
133 template <typename AllocatorT>
stressCache(AllocatorT * Allocator)134 static void stressCache(AllocatorT *Allocator) NO_THREAD_SAFETY_ANALYSIS {
135   auto Registry = Allocator->getTSDRegistry();
136   {
137     std::unique_lock<std::mutex> Lock(Mutex);
138     while (!Ready)
139       Cv.wait(Lock);
140   }
141   Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
142   typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
143   // For an exclusive TSD, the cache should be empty. We cannot guarantee the
144   // same for a shared TSD.
145   if (std::is_same<typename AllocatorT::TSDRegistryT,
146                    scudo::TSDRegistryExT<AllocatorT>>()) {
147     EXPECT_EQ(TSD->getCache().Canary, 0U);
148   }
149   // Transform the thread id to a uptr to use it as canary.
150   const scudo::uptr Canary = static_cast<scudo::uptr>(
151       std::hash<std::thread::id>{}(std::this_thread::get_id()));
152   TSD->getCache().Canary = Canary;
153   // Loop a few times to make sure that a concurrent thread isn't modifying it.
154   for (scudo::uptr I = 0; I < 4096U; I++)
155     EXPECT_EQ(TSD->getCache().Canary, Canary);
156 }
157 
testRegistryThreaded()158 template <class AllocatorT> static void testRegistryThreaded() {
159   Ready = false;
160   auto Deleter = [](AllocatorT *A) {
161     A->unmapTestOnly();
162     delete A;
163   };
164   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
165                                                            Deleter);
166   std::thread Threads[32];
167   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
168     Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
169   {
170     std::unique_lock<std::mutex> Lock(Mutex);
171     Ready = true;
172     Cv.notify_all();
173   }
174   for (auto &T : Threads)
175     T.join();
176 }
177 
TEST(ScudoTSDTest,TSDRegistryThreaded)178 TEST(ScudoTSDTest, TSDRegistryThreaded) {
179   testRegistryThreaded<MockAllocator<OneCache>>();
180   testRegistryThreaded<MockAllocator<SharedCaches>>();
181 #if !SCUDO_FUCHSIA
182   testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
183 #endif
184 }
185 
186 static std::set<void *> Pointers;
187 
stressSharedRegistry(MockAllocator<SharedCaches> * Allocator)188 static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
189   std::set<void *> Set;
190   auto Registry = Allocator->getTSDRegistry();
191   {
192     std::unique_lock<std::mutex> Lock(Mutex);
193     while (!Ready)
194       Cv.wait(Lock);
195   }
196   Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
197   for (scudo::uptr I = 0; I < 4096U; I++) {
198     typename MockAllocator<SharedCaches>::TSDRegistryT::ScopedTSD TSD(
199         *Registry);
200     Set.insert(reinterpret_cast<void *>(&*TSD));
201   }
202   {
203     std::unique_lock<std::mutex> Lock(Mutex);
204     Pointers.insert(Set.begin(), Set.end());
205   }
206 }
207 
TEST(ScudoTSDTest,TSDRegistryTSDsCount)208 TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
209   Ready = false;
210   Pointers.clear();
211   using AllocatorT = MockAllocator<SharedCaches>;
212   auto Deleter = [](AllocatorT *A) {
213     A->unmapTestOnly();
214     delete A;
215   };
216   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
217                                                            Deleter);
218   // We attempt to use as many TSDs as the shared cache offers by creating a
219   // decent amount of threads that will be run concurrently and attempt to get
220   // and lock TSDs. We put them all in a set and count the number of entries
221   // after we are done.
222   std::thread Threads[32];
223   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
224     Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
225   {
226     std::unique_lock<std::mutex> Lock(Mutex);
227     Ready = true;
228     Cv.notify_all();
229   }
230   for (auto &T : Threads)
231     T.join();
232   // The initial number of TSDs we get will be the minimum of the default count
233   // and the number of CPUs.
234   EXPECT_LE(Pointers.size(), 8U);
235   Pointers.clear();
236   auto Registry = Allocator->getTSDRegistry();
237   // Increase the number of TSDs to 16.
238   Registry->setOption(scudo::Option::MaxTSDsCount, 16);
239   Ready = false;
240   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
241     Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
242   {
243     std::unique_lock<std::mutex> Lock(Mutex);
244     Ready = true;
245     Cv.notify_all();
246   }
247   for (auto &T : Threads)
248     T.join();
249   // We should get 16 distinct TSDs back.
250   EXPECT_EQ(Pointers.size(), 16U);
251 }
252