xref: /aosp_15_r20/external/scudo/standalone/tests/combined_test.cpp (revision 76559068c068bd27e82aff38fac3bfc865233bca)
1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "memtag.h"
10 #include "stack_depot.h"
11 #include "tests/scudo_unit_test.h"
12 
13 #include "allocator_config.h"
14 #include "chunk.h"
15 #include "combined.h"
16 #include "condition_variable.h"
17 #include "mem_map.h"
18 #include "size_class_map.h"
19 
20 #include <algorithm>
21 #include <condition_variable>
22 #include <memory>
23 #include <mutex>
24 #include <set>
25 #include <stdlib.h>
26 #include <thread>
27 #include <vector>
28 
29 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
30 static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
31 
32 // Fuchsia complains that the function is not used.
disableDebuggerdMaybe()33 UNUSED static void disableDebuggerdMaybe() {
34 #if SCUDO_ANDROID
35   // Disable the debuggerd signal handler on Android, without this we can end
36   // up spending a significant amount of time creating tombstones.
37   signal(SIGSEGV, SIG_DFL);
38 #endif
39 }
40 
41 template <class AllocatorT>
isPrimaryAllocation(scudo::uptr Size,scudo::uptr Alignment)42 bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
43   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
44   if (Alignment < MinAlignment)
45     Alignment = MinAlignment;
46   const scudo::uptr NeededSize =
47       scudo::roundUp(Size, MinAlignment) +
48       ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
49   return AllocatorT::PrimaryT::canAllocate(NeededSize);
50 }
51 
52 template <class AllocatorT>
checkMemoryTaggingMaybe(AllocatorT * Allocator,void * P,scudo::uptr Size,scudo::uptr Alignment)53 void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
54                              scudo::uptr Alignment) {
55   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
56   Size = scudo::roundUp(Size, MinAlignment);
57   if (Allocator->useMemoryTaggingTestOnly())
58     EXPECT_DEATH(
59         {
60           disableDebuggerdMaybe();
61           reinterpret_cast<char *>(P)[-1] = 'A';
62         },
63         "");
64   if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
65           ? Allocator->useMemoryTaggingTestOnly()
66           : Alignment == MinAlignment) {
67     EXPECT_DEATH(
68         {
69           disableDebuggerdMaybe();
70           reinterpret_cast<char *>(P)[Size] = 'A';
71         },
72         "");
73   }
74 }
75 
76 template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocatorTestAllocator77   TestAllocator() {
78     this->initThreadMaybe();
79     if (scudo::archSupportsMemoryTagging() &&
80         !scudo::systemDetectsMemoryTagFaultsTestOnly())
81       this->disableMemoryTagging();
82   }
~TestAllocatorTestAllocator83   ~TestAllocator() { this->unmapTestOnly(); }
84 
85   void *operator new(size_t size);
86   void operator delete(void *ptr);
87 };
88 
89 constexpr size_t kMaxAlign = std::max({
90   alignof(scudo::Allocator<scudo::DefaultConfig>),
91 #if SCUDO_CAN_USE_PRIMARY64
92       alignof(scudo::Allocator<scudo::FuchsiaConfig>),
93 #endif
94       alignof(scudo::Allocator<scudo::AndroidConfig>)
95 });
96 
97 #if SCUDO_RISCV64
98 // The allocator is over 4MB large. Rather than creating an instance of this on
99 // the heap, keep it in a global storage to reduce fragmentation from having to
100 // mmap this at the start of every test.
101 struct TestAllocatorStorage {
102   static constexpr size_t kMaxSize = std::max({
103     sizeof(scudo::Allocator<scudo::DefaultConfig>),
104 #if SCUDO_CAN_USE_PRIMARY64
105         sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
106 #endif
107         sizeof(scudo::Allocator<scudo::AndroidConfig>)
108   });
109 
110   // To alleviate some problem, let's skip the thread safety analysis here.
getTestAllocatorStorage111   static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
112     CHECK(size <= kMaxSize &&
113           "Allocation size doesn't fit in the allocator storage");
114     M.lock();
115     return AllocatorStorage;
116   }
117 
releaseTestAllocatorStorage118   static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
119     M.assertHeld();
120     M.unlock();
121     ASSERT_EQ(ptr, AllocatorStorage);
122   }
123 
124   static scudo::HybridMutex M;
125   static uint8_t AllocatorStorage[kMaxSize];
126 };
127 scudo::HybridMutex TestAllocatorStorage::M;
128 alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
129 #else
130 struct TestAllocatorStorage {
getTestAllocatorStorage131   static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
132     void *p = nullptr;
133     EXPECT_EQ(0, posix_memalign(&p, kMaxAlign, size));
134     return p;
135   }
releaseTestAllocatorStorage136   static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr); }
137 };
138 #endif
139 
140 template <typename Config>
operator new(size_t size)141 void *TestAllocator<Config>::operator new(size_t size) {
142   return TestAllocatorStorage::get(size);
143 }
144 
145 template <typename Config>
operator delete(void * ptr)146 void TestAllocator<Config>::operator delete(void *ptr) {
147   TestAllocatorStorage::release(ptr);
148 }
149 
150 template <class TypeParam> struct ScudoCombinedTest : public Test {
ScudoCombinedTestScudoCombinedTest151   ScudoCombinedTest() {
152     UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
153     Allocator = std::make_unique<AllocatorT>();
154   }
~ScudoCombinedTestScudoCombinedTest155   ~ScudoCombinedTest() {
156     Allocator->releaseToOS(scudo::ReleaseToOS::Force);
157     UseQuarantine = true;
158   }
159 
160   void RunTest();
161 
162   void BasicTest(scudo::uptr SizeLog);
163 
164   using AllocatorT = TestAllocator<TypeParam>;
165   std::unique_ptr<AllocatorT> Allocator;
166 };
167 
168 template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
169 
170 namespace scudo {
171 struct TestConditionVariableConfig {
172   static const bool MaySupportMemoryTagging = true;
173   template <class A>
174   using TSDRegistryT =
175       scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
176 
177   struct Primary {
178     using SizeClassMap = scudo::AndroidSizeClassMap;
179 #if SCUDO_CAN_USE_PRIMARY64
180     static const scudo::uptr RegionSizeLog = 28U;
181     typedef scudo::u32 CompactPtrT;
182     static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
183     static const scudo::uptr GroupSizeLog = 20U;
184     static const bool EnableRandomOffset = true;
185     static const scudo::uptr MapSizeIncrement = 1UL << 18;
186 #else
187     static const scudo::uptr RegionSizeLog = 18U;
188     static const scudo::uptr GroupSizeLog = 18U;
189     typedef scudo::uptr CompactPtrT;
190 #endif
191     static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
192     static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
193 #if SCUDO_LINUX
194     using ConditionVariableT = scudo::ConditionVariableLinux;
195 #else
196     using ConditionVariableT = scudo::ConditionVariableDummy;
197 #endif
198   };
199 #if SCUDO_CAN_USE_PRIMARY64
200   template <typename Config>
201   using PrimaryT = scudo::SizeClassAllocator64<Config>;
202 #else
203   template <typename Config>
204   using PrimaryT = scudo::SizeClassAllocator32<Config>;
205 #endif
206 
207   struct Secondary {
208     template <typename Config>
209     using CacheT = scudo::MapAllocatorNoCache<Config>;
210   };
211   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
212 };
213 } // namespace scudo
214 
215 #if SCUDO_FUCHSIA
216 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
217   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
218 #else
219 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
220   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig)                          \
221   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)                          \
222   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
223 #endif
224 
225 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
226   using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>;                   \
227   TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
228 
229 #define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
230   template <class TypeParam>                                                   \
231   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
232     using BaseT = FIXTURE<TypeParam>;                                          \
233     void Run();                                                                \
234   };                                                                           \
235   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
236   template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
237 
238 // Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
239 // using thread-safety analysis. Alternatively, we verify the thread safety
240 // through a runtime check in ScopedTSD and mark the test body with
241 // NO_THREAD_SAFETY_ANALYSIS.
242 #define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME)                     \
243   template <class TypeParam>                                                   \
244   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
245     using BaseT = FIXTURE<TypeParam>;                                          \
246     void Run() NO_THREAD_SAFETY_ANALYSIS;                                      \
247   };                                                                           \
248   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
249   template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
250 
SCUDO_TYPED_TEST(ScudoCombinedTest,IsOwned)251 SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
252   auto *Allocator = this->Allocator.get();
253   static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
254   EXPECT_FALSE(
255       Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
256 
257   scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
258   for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
259     StackBuffer[I] = 0x42U;
260   EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
261   for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
262     EXPECT_EQ(StackBuffer[I], 0x42U);
263 }
264 
265 template <class Config>
BasicTest(scudo::uptr SizeLog)266 void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
267   auto *Allocator = this->Allocator.get();
268 
269   // This allocates and deallocates a bunch of chunks, with a wide range of
270   // sizes and alignments, with a focus on sizes that could trigger weird
271   // behaviors (plus or minus a small delta of a power of two for example).
272   for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
273     const scudo::uptr Align = 1U << AlignLog;
274     for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
275       if ((1LL << SizeLog) + Delta < 0)
276         continue;
277       const scudo::uptr Size =
278           static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
279       void *P = Allocator->allocate(Size, Origin, Align);
280       EXPECT_NE(P, nullptr);
281       EXPECT_TRUE(Allocator->isOwned(P));
282       EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
283       EXPECT_LE(Size, Allocator->getUsableSize(P));
284       memset(P, 0xaa, Size);
285       checkMemoryTaggingMaybe(Allocator, P, Size, Align);
286       Allocator->deallocate(P, Origin, Size);
287     }
288   }
289 
290   Allocator->printStats();
291   Allocator->printFragmentationInfo();
292 }
293 
294 #define SCUDO_MAKE_BASIC_TEST(SizeLog)                                         \
295   SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) {           \
296     this->BasicTest(SizeLog);                                                  \
297   }
298 
299 SCUDO_MAKE_BASIC_TEST(0)
300 SCUDO_MAKE_BASIC_TEST(1)
301 SCUDO_MAKE_BASIC_TEST(2)
302 SCUDO_MAKE_BASIC_TEST(3)
303 SCUDO_MAKE_BASIC_TEST(4)
304 SCUDO_MAKE_BASIC_TEST(5)
305 SCUDO_MAKE_BASIC_TEST(6)
306 SCUDO_MAKE_BASIC_TEST(7)
307 SCUDO_MAKE_BASIC_TEST(8)
308 SCUDO_MAKE_BASIC_TEST(9)
309 SCUDO_MAKE_BASIC_TEST(10)
310 SCUDO_MAKE_BASIC_TEST(11)
311 SCUDO_MAKE_BASIC_TEST(12)
312 SCUDO_MAKE_BASIC_TEST(13)
313 SCUDO_MAKE_BASIC_TEST(14)
314 SCUDO_MAKE_BASIC_TEST(15)
315 SCUDO_MAKE_BASIC_TEST(16)
316 SCUDO_MAKE_BASIC_TEST(17)
317 SCUDO_MAKE_BASIC_TEST(18)
318 SCUDO_MAKE_BASIC_TEST(19)
319 SCUDO_MAKE_BASIC_TEST(20)
320 
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroContents)321 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
322   auto *Allocator = this->Allocator.get();
323 
324   // Ensure that specifying ZeroContents returns a zero'd out block.
325   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
326     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
327       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
328       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
329       EXPECT_NE(P, nullptr);
330       for (scudo::uptr I = 0; I < Size; I++)
331         ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
332       memset(P, 0xaa, Size);
333       Allocator->deallocate(P, Origin, Size);
334     }
335   }
336 }
337 
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroFill)338 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
339   auto *Allocator = this->Allocator.get();
340 
341   // Ensure that specifying ZeroFill returns a zero'd out block.
342   Allocator->setFillContents(scudo::ZeroFill);
343   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
344     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
345       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
346       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
347       EXPECT_NE(P, nullptr);
348       for (scudo::uptr I = 0; I < Size; I++)
349         ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
350       memset(P, 0xaa, Size);
351       Allocator->deallocate(P, Origin, Size);
352     }
353   }
354 }
355 
SCUDO_TYPED_TEST(ScudoCombinedTest,PatternOrZeroFill)356 SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
357   auto *Allocator = this->Allocator.get();
358 
359   // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
360   // block. The primary allocator only produces pattern filled blocks if MTE
361   // is disabled, so we only require pattern filled blocks in that case.
362   Allocator->setFillContents(scudo::PatternOrZeroFill);
363   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
364     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
365       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
366       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
367       EXPECT_NE(P, nullptr);
368       for (scudo::uptr I = 0; I < Size; I++) {
369         unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
370         if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
371                                                           1U << MinAlignLog) &&
372             !Allocator->useMemoryTaggingTestOnly())
373           ASSERT_EQ(V, scudo::PatternFillByte);
374         else
375           ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
376       }
377       memset(P, 0xaa, Size);
378       Allocator->deallocate(P, Origin, Size);
379     }
380   }
381 }
382 
SCUDO_TYPED_TEST(ScudoCombinedTest,BlockReuse)383 SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
384   auto *Allocator = this->Allocator.get();
385 
386   // Verify that a chunk will end up being reused, at some point.
387   const scudo::uptr NeedleSize = 1024U;
388   void *NeedleP = Allocator->allocate(NeedleSize, Origin);
389   Allocator->deallocate(NeedleP, Origin);
390   bool Found = false;
391   for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
392     void *P = Allocator->allocate(NeedleSize, Origin);
393     if (Allocator->getHeaderTaggedPointer(P) ==
394         Allocator->getHeaderTaggedPointer(NeedleP))
395       Found = true;
396     Allocator->deallocate(P, Origin);
397   }
398   EXPECT_TRUE(Found);
399 }
400 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeIncreasing)401 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
402   auto *Allocator = this->Allocator.get();
403 
404   // Reallocate a chunk all the way up to a secondary allocation, verifying that
405   // we preserve the data in the process.
406   scudo::uptr Size = 16;
407   void *P = Allocator->allocate(Size, Origin);
408   const char Marker = 'A';
409   memset(P, Marker, Size);
410   while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
411     void *NewP = Allocator->reallocate(P, Size * 2);
412     EXPECT_NE(NewP, nullptr);
413     for (scudo::uptr J = 0; J < Size; J++)
414       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
415     memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
416     Size *= 2U;
417     P = NewP;
418   }
419   Allocator->deallocate(P, Origin);
420 }
421 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeDecreasing)422 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
423   auto *Allocator = this->Allocator.get();
424 
425   // Reallocate a large chunk all the way down to a byte, verifying that we
426   // preserve the data in the process.
427   scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
428   const scudo::uptr DataSize = 2048U;
429   void *P = Allocator->allocate(Size, Origin);
430   const char Marker = 'A';
431   memset(P, Marker, scudo::Min(Size, DataSize));
432   while (Size > 1U) {
433     Size /= 2U;
434     void *NewP = Allocator->reallocate(P, Size);
435     EXPECT_NE(NewP, nullptr);
436     for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
437       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
438     P = NewP;
439   }
440   Allocator->deallocate(P, Origin);
441 }
442 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,ReallocateSame)443 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
444   auto *Allocator = this->Allocator.get();
445 
446   // Check that reallocating a chunk to a slightly smaller or larger size
447   // returns the same chunk. This requires that all the sizes we iterate on use
448   // the same block size, but that should be the case for MaxSize - 64 with our
449   // default class size maps.
450   constexpr scudo::uptr InitialSize =
451       TypeParam::Primary::SizeClassMap::MaxSize - 64;
452   const char Marker = 'A';
453   Allocator->setFillContents(scudo::PatternOrZeroFill);
454 
455   void *P = Allocator->allocate(InitialSize, Origin);
456   scudo::uptr CurrentSize = InitialSize;
457   for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
458     memset(P, Marker, CurrentSize);
459     const scudo::uptr NewSize =
460         static_cast<scudo::uptr>(static_cast<scudo::sptr>(InitialSize) + Delta);
461     void *NewP = Allocator->reallocate(P, NewSize);
462     EXPECT_EQ(NewP, P);
463 
464     // Verify that existing contents have been preserved.
465     for (scudo::uptr I = 0; I < scudo::Min(CurrentSize, NewSize); I++)
466       EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
467 
468     // Verify that new bytes are set according to FillContentsMode.
469     for (scudo::uptr I = CurrentSize; I < NewSize; I++) {
470       unsigned char V = (reinterpret_cast<unsigned char *>(NewP))[I];
471       EXPECT_TRUE(V == scudo::PatternFillByte || V == 0);
472     }
473 
474     checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
475     CurrentSize = NewSize;
476   }
477   Allocator->deallocate(P, Origin);
478 }
479 
SCUDO_TYPED_TEST(ScudoCombinedTest,IterateOverChunks)480 SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
481   auto *Allocator = this->Allocator.get();
482   // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
483   // they are the ones we allocated. This requires the allocator to not have any
484   // other allocated chunk at this point (eg: won't work with the Quarantine).
485   // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
486   // iterateOverChunks reads header by tagged and non-tagger pointers so one of
487   // them will fail.
488   if (!UseQuarantine) {
489     std::vector<void *> V;
490     for (scudo::uptr I = 0; I < 64U; I++)
491       V.push_back(Allocator->allocate(
492           static_cast<scudo::uptr>(std::rand()) %
493               (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
494           Origin));
495     Allocator->disable();
496     Allocator->iterateOverChunks(
497         0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
498         [](uintptr_t Base, UNUSED size_t Size, void *Arg) {
499           std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
500           void *P = reinterpret_cast<void *>(Base);
501           EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
502         },
503         reinterpret_cast<void *>(&V));
504     Allocator->enable();
505     for (auto P : V)
506       Allocator->deallocate(P, Origin);
507   }
508 }
509 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,UseAfterFree)510 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
511   auto *Allocator = this->Allocator.get();
512 
513   // Check that use-after-free is detected.
514   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
515     const scudo::uptr Size = 1U << SizeLog;
516     if (!Allocator->useMemoryTaggingTestOnly())
517       continue;
518     EXPECT_DEATH(
519         {
520           disableDebuggerdMaybe();
521           void *P = Allocator->allocate(Size, Origin);
522           Allocator->deallocate(P, Origin);
523           reinterpret_cast<char *>(P)[0] = 'A';
524         },
525         "");
526     EXPECT_DEATH(
527         {
528           disableDebuggerdMaybe();
529           void *P = Allocator->allocate(Size, Origin);
530           Allocator->deallocate(P, Origin);
531           reinterpret_cast<char *>(P)[Size - 1] = 'A';
532         },
533         "");
534   }
535 }
536 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DoubleFreeFromPrimary)537 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DoubleFreeFromPrimary) {
538   auto *Allocator = this->Allocator.get();
539 
540   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
541     const scudo::uptr Size = 1U << SizeLog;
542     if (!isPrimaryAllocation<TestAllocator<TypeParam>>(Size, 0))
543       break;
544 
545     // Verify that a double free results in a chunk state error.
546     EXPECT_DEATH(
547         {
548           // Allocate from primary
549           void *P = Allocator->allocate(Size, Origin);
550           ASSERT_TRUE(P != nullptr);
551           Allocator->deallocate(P, Origin);
552           Allocator->deallocate(P, Origin);
553         },
554         "invalid chunk state");
555   }
556 }
557 
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DisableMemoryTagging)558 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
559   auto *Allocator = this->Allocator.get();
560 
561   if (Allocator->useMemoryTaggingTestOnly()) {
562     // Check that disabling memory tagging works correctly.
563     void *P = Allocator->allocate(2048, Origin);
564     EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
565     scudo::ScopedDisableMemoryTagChecks NoTagChecks;
566     Allocator->disableMemoryTagging();
567     reinterpret_cast<char *>(P)[2048] = 'A';
568     Allocator->deallocate(P, Origin);
569 
570     P = Allocator->allocate(2048, Origin);
571     EXPECT_EQ(scudo::untagPointer(P), P);
572     reinterpret_cast<char *>(P)[2048] = 'A';
573     Allocator->deallocate(P, Origin);
574 
575     Allocator->releaseToOS(scudo::ReleaseToOS::Force);
576   }
577 }
578 
SCUDO_TYPED_TEST(ScudoCombinedTest,Stats)579 SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
580   auto *Allocator = this->Allocator.get();
581 
582   scudo::uptr BufferSize = 8192;
583   std::vector<char> Buffer(BufferSize);
584   scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
585   while (ActualSize > BufferSize) {
586     BufferSize = ActualSize + 1024;
587     Buffer.resize(BufferSize);
588     ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
589   }
590   std::string Stats(Buffer.begin(), Buffer.end());
591   // Basic checks on the contents of the statistics output, which also allows us
592   // to verify that we got it all.
593   EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
594   EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
595   EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
596 }
597 
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest,CacheDrain)598 SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, CacheDrain) {
599   using AllocatorT = typename BaseT::AllocatorT;
600   auto *Allocator = this->Allocator.get();
601 
602   std::vector<void *> V;
603   for (scudo::uptr I = 0; I < 64U; I++)
604     V.push_back(Allocator->allocate(
605         static_cast<scudo::uptr>(std::rand()) %
606             (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
607         Origin));
608   for (auto P : V)
609     Allocator->deallocate(P, Origin);
610 
611   typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
612       *Allocator->getTSDRegistry());
613   EXPECT_TRUE(!TSD->getCache().isEmpty());
614   TSD->getCache().drain();
615   EXPECT_TRUE(TSD->getCache().isEmpty());
616 }
617 
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest,ForceCacheDrain)618 SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, ForceCacheDrain) {
619   using AllocatorT = typename BaseT::AllocatorT;
620   auto *Allocator = this->Allocator.get();
621 
622   std::vector<void *> V;
623   for (scudo::uptr I = 0; I < 64U; I++)
624     V.push_back(Allocator->allocate(
625         static_cast<scudo::uptr>(std::rand()) %
626             (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
627         Origin));
628   for (auto P : V)
629     Allocator->deallocate(P, Origin);
630 
631   // `ForceAll` will also drain the caches.
632   Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
633 
634   typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
635       *Allocator->getTSDRegistry());
636   EXPECT_TRUE(TSD->getCache().isEmpty());
637   EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
638   EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
639 }
640 
SCUDO_TYPED_TEST(ScudoCombinedTest,ThreadedCombined)641 SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
642   std::mutex Mutex;
643   std::condition_variable Cv;
644   bool Ready = false;
645   auto *Allocator = this->Allocator.get();
646   std::thread Threads[32];
647   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
648     Threads[I] = std::thread([&]() {
649       {
650         std::unique_lock<std::mutex> Lock(Mutex);
651         while (!Ready)
652           Cv.wait(Lock);
653       }
654       std::vector<std::pair<void *, scudo::uptr>> V;
655       for (scudo::uptr I = 0; I < 256U; I++) {
656         const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
657         void *P = Allocator->allocate(Size, Origin);
658         // A region could have ran out of memory, resulting in a null P.
659         if (P)
660           V.push_back(std::make_pair(P, Size));
661       }
662 
663       // Try to interleave pushBlocks(), popBatch() and releaseToOS().
664       Allocator->releaseToOS(scudo::ReleaseToOS::Force);
665 
666       while (!V.empty()) {
667         auto Pair = V.back();
668         Allocator->deallocate(Pair.first, Origin, Pair.second);
669         V.pop_back();
670       }
671     });
672   {
673     std::unique_lock<std::mutex> Lock(Mutex);
674     Ready = true;
675     Cv.notify_all();
676   }
677   for (auto &T : Threads)
678     T.join();
679   Allocator->releaseToOS(scudo::ReleaseToOS::Force);
680 }
681 
682 // Test that multiple instantiations of the allocator have not messed up the
683 // process's signal handlers (GWP-ASan used to do this).
TEST(ScudoCombinedDeathTest,SKIP_ON_FUCHSIA (testSEGV))684 TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
685   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
686   scudo::ReservedMemoryT ReservedMemory;
687   ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, "testSEGV"));
688   void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
689   ASSERT_NE(P, nullptr);
690   EXPECT_DEATH(memset(P, 0xaa, Size), "");
691   ReservedMemory.release();
692 }
693 
694 struct DeathSizeClassConfig {
695   static const scudo::uptr NumBits = 1;
696   static const scudo::uptr MinSizeLog = 10;
697   static const scudo::uptr MidSizeLog = 10;
698   static const scudo::uptr MaxSizeLog = 13;
699   static const scudo::u16 MaxNumCachedHint = 8;
700   static const scudo::uptr MaxBytesCachedLog = 12;
701   static const scudo::uptr SizeDelta = 0;
702 };
703 
704 static const scudo::uptr DeathRegionSizeLog = 21U;
705 struct DeathConfig {
706   static const bool MaySupportMemoryTagging = false;
707   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
708 
709   struct Primary {
710     // Tiny allocator, its Primary only serves chunks of four sizes.
711     using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
712     static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
713     static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
714     static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
715     typedef scudo::uptr CompactPtrT;
716     static const scudo::uptr CompactPtrScale = 0;
717     static const bool EnableRandomOffset = true;
718     static const scudo::uptr MapSizeIncrement = 1UL << 18;
719     static const scudo::uptr GroupSizeLog = 18;
720   };
721   template <typename Config>
722   using PrimaryT = scudo::SizeClassAllocator64<Config>;
723 
724   struct Secondary {
725     template <typename Config>
726     using CacheT = scudo::MapAllocatorNoCache<Config>;
727   };
728 
729   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
730 };
731 
TEST(ScudoCombinedDeathTest,DeathCombined)732 TEST(ScudoCombinedDeathTest, DeathCombined) {
733   using AllocatorT = TestAllocator<DeathConfig>;
734   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
735 
736   const scudo::uptr Size = 1000U;
737   void *P = Allocator->allocate(Size, Origin);
738   EXPECT_NE(P, nullptr);
739 
740   // Invalid sized deallocation.
741   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
742 
743   // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
744   UNUSED void *MisalignedP =
745       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
746   EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
747   EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
748 
749   // Header corruption.
750   scudo::u64 *H =
751       reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
752   *H ^= 0x42U;
753   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
754   *H ^= 0x420042U;
755   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
756   *H ^= 0x420000U;
757 
758   // Invalid chunk state.
759   Allocator->deallocate(P, Origin, Size);
760   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
761   EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
762   EXPECT_DEATH(Allocator->getUsableSize(P), "");
763 }
764 
765 // Verify that when a region gets full, the allocator will still manage to
766 // fulfill the allocation through a larger size class.
TEST(ScudoCombinedTest,FullRegion)767 TEST(ScudoCombinedTest, FullRegion) {
768   using AllocatorT = TestAllocator<DeathConfig>;
769   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
770 
771   std::vector<void *> V;
772   scudo::uptr FailedAllocationsCount = 0;
773   for (scudo::uptr ClassId = 1U;
774        ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
775        ClassId++) {
776     const scudo::uptr Size =
777         DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
778     // Allocate enough to fill all of the regions above this one.
779     const scudo::uptr MaxNumberOfChunks =
780         ((1U << DeathRegionSizeLog) / Size) *
781         (DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
782     void *P;
783     for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
784       P = Allocator->allocate(Size - 64U, Origin);
785       if (!P)
786         FailedAllocationsCount++;
787       else
788         V.push_back(P);
789     }
790     while (!V.empty()) {
791       Allocator->deallocate(V.back(), Origin);
792       V.pop_back();
793     }
794   }
795   EXPECT_EQ(FailedAllocationsCount, 0U);
796 }
797 
798 // Ensure that releaseToOS can be called prior to any other allocator
799 // operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest,ReleaseToOS)800 SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
801   auto *Allocator = this->Allocator.get();
802   Allocator->releaseToOS(scudo::ReleaseToOS::Force);
803 }
804 
SCUDO_TYPED_TEST(ScudoCombinedTest,OddEven)805 SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
806   auto *Allocator = this->Allocator.get();
807   Allocator->setOption(scudo::Option::MemtagTuning, M_MEMTAG_TUNING_BUFFER_OVERFLOW);
808 
809   if (!Allocator->useMemoryTaggingTestOnly())
810     return;
811 
812   auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
813     scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
814     scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
815     EXPECT_NE(Tag1 % 2, Tag2 % 2);
816   };
817 
818   using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
819   for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
820        ClassId++) {
821     const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
822 
823     std::set<scudo::uptr> Ptrs;
824     bool Found = false;
825     for (unsigned I = 0; I != 65536; ++I) {
826       scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
827           Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
828       if (Ptrs.count(P - Size)) {
829         Found = true;
830         CheckOddEven(P, P - Size);
831         break;
832       }
833       if (Ptrs.count(P + Size)) {
834         Found = true;
835         CheckOddEven(P, P + Size);
836         break;
837       }
838       Ptrs.insert(P);
839     }
840     EXPECT_TRUE(Found);
841   }
842 }
843 
SCUDO_TYPED_TEST(ScudoCombinedTest,DisableMemInit)844 SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
845   auto *Allocator = this->Allocator.get();
846 
847   std::vector<void *> Ptrs(65536);
848 
849   Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
850 
851   constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
852 
853   // Test that if mem-init is disabled on a thread, calloc should still work as
854   // expected. This is tricky to ensure when MTE is enabled, so this test tries
855   // to exercise the relevant code on our MTE path.
856   for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
857     using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
858     const scudo::uptr Size =
859         SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
860     if (Size < 8)
861       continue;
862     for (unsigned I = 0; I != Ptrs.size(); ++I) {
863       Ptrs[I] = Allocator->allocate(Size, Origin);
864       memset(Ptrs[I], 0xaa, Size);
865     }
866     for (unsigned I = 0; I != Ptrs.size(); ++I)
867       Allocator->deallocate(Ptrs[I], Origin, Size);
868     for (unsigned I = 0; I != Ptrs.size(); ++I) {
869       Ptrs[I] = Allocator->allocate(Size - 8, Origin);
870       memset(Ptrs[I], 0xbb, Size - 8);
871     }
872     for (unsigned I = 0; I != Ptrs.size(); ++I)
873       Allocator->deallocate(Ptrs[I], Origin, Size - 8);
874     for (unsigned I = 0; I != Ptrs.size(); ++I) {
875       Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
876       for (scudo::uptr J = 0; J < Size; ++J)
877         ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
878     }
879   }
880 
881   Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
882 }
883 
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateInPlaceStress)884 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
885   auto *Allocator = this->Allocator.get();
886 
887   // Regression test: make realloc-in-place happen at the very right end of a
888   // mapped region.
889   constexpr size_t nPtrs = 10000;
890   for (scudo::uptr i = 1; i < 32; ++i) {
891     scudo::uptr Size = 16 * i - 1;
892     std::vector<void *> Ptrs;
893     for (size_t i = 0; i < nPtrs; ++i) {
894       void *P = Allocator->allocate(Size, Origin);
895       P = Allocator->reallocate(P, Size + 1);
896       Ptrs.push_back(P);
897     }
898 
899     for (size_t i = 0; i < nPtrs; ++i)
900       Allocator->deallocate(Ptrs[i], Origin);
901   }
902 }
903 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferDefaultDisabled)904 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferDefaultDisabled) {
905   // The RingBuffer is not initialized until tracking is enabled for the
906   // first time.
907   auto *Allocator = this->Allocator.get();
908   EXPECT_EQ(0u, Allocator->getRingBufferSize());
909   EXPECT_EQ(nullptr, Allocator->getRingBufferAddress());
910 }
911 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferInitOnce)912 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferInitOnce) {
913   auto *Allocator = this->Allocator.get();
914   Allocator->setTrackAllocationStacks(true);
915 
916   auto RingBufferSize = Allocator->getRingBufferSize();
917   ASSERT_GT(RingBufferSize, 0u);
918   auto *RingBufferAddress = Allocator->getRingBufferAddress();
919   EXPECT_NE(nullptr, RingBufferAddress);
920 
921   // Enable tracking again to verify that the initialization only happens once.
922   Allocator->setTrackAllocationStacks(true);
923   ASSERT_EQ(RingBufferSize, Allocator->getRingBufferSize());
924   EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
925 }
926 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferSize)927 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
928   auto *Allocator = this->Allocator.get();
929   Allocator->setTrackAllocationStacks(true);
930 
931   auto RingBufferSize = Allocator->getRingBufferSize();
932   ASSERT_GT(RingBufferSize, 0u);
933   EXPECT_EQ(Allocator->getRingBufferAddress()[RingBufferSize - 1], '\0');
934 }
935 
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferAddress)936 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
937   auto *Allocator = this->Allocator.get();
938   Allocator->setTrackAllocationStacks(true);
939 
940   auto *RingBufferAddress = Allocator->getRingBufferAddress();
941   EXPECT_NE(RingBufferAddress, nullptr);
942   EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
943 }
944 
SCUDO_TYPED_TEST(ScudoCombinedTest,StackDepotDefaultDisabled)945 SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotDefaultDisabled) {
946   // The StackDepot is not initialized until tracking is enabled for the
947   // first time.
948   auto *Allocator = this->Allocator.get();
949   EXPECT_EQ(0u, Allocator->getStackDepotSize());
950   EXPECT_EQ(nullptr, Allocator->getStackDepotAddress());
951 }
952 
SCUDO_TYPED_TEST(ScudoCombinedTest,StackDepotInitOnce)953 SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotInitOnce) {
954   auto *Allocator = this->Allocator.get();
955   Allocator->setTrackAllocationStacks(true);
956 
957   auto StackDepotSize = Allocator->getStackDepotSize();
958   EXPECT_GT(StackDepotSize, 0u);
959   auto *StackDepotAddress = Allocator->getStackDepotAddress();
960   EXPECT_NE(nullptr, StackDepotAddress);
961 
962   // Enable tracking again to verify that the initialization only happens once.
963   Allocator->setTrackAllocationStacks(true);
964   EXPECT_EQ(StackDepotSize, Allocator->getStackDepotSize());
965   EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
966 }
967 
SCUDO_TYPED_TEST(ScudoCombinedTest,StackDepotSize)968 SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
969   auto *Allocator = this->Allocator.get();
970   Allocator->setTrackAllocationStacks(true);
971 
972   auto StackDepotSize = Allocator->getStackDepotSize();
973   EXPECT_GT(StackDepotSize, 0u);
974   EXPECT_EQ(Allocator->getStackDepotAddress()[StackDepotSize - 1], '\0');
975 }
976 
SCUDO_TYPED_TEST(ScudoCombinedTest,StackDepotAddress)977 SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
978   auto *Allocator = this->Allocator.get();
979   Allocator->setTrackAllocationStacks(true);
980 
981   auto *StackDepotAddress = Allocator->getStackDepotAddress();
982   EXPECT_NE(StackDepotAddress, nullptr);
983   EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
984 }
985 
SCUDO_TYPED_TEST(ScudoCombinedTest,StackDepot)986 SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
987   alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) +
988                                       1024 * sizeof(scudo::atomic_u64) +
989                                       1024 * sizeof(scudo::atomic_u32)] = {};
990   auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf);
991   Depot->init(1024, 1024);
992   ASSERT_TRUE(Depot->isValid(sizeof(Buf)));
993   ASSERT_FALSE(Depot->isValid(sizeof(Buf) - 1));
994   scudo::uptr Stack[] = {1, 2, 3};
995   scudo::u32 Elem = Depot->insert(&Stack[0], &Stack[3]);
996   scudo::uptr RingPosPtr = 0;
997   scudo::uptr SizePtr = 0;
998   ASSERT_TRUE(Depot->find(Elem, &RingPosPtr, &SizePtr));
999   ASSERT_EQ(SizePtr, 3u);
1000   EXPECT_EQ(Depot->at(RingPosPtr), 1u);
1001   EXPECT_EQ(Depot->at(RingPosPtr + 1), 2u);
1002   EXPECT_EQ(Depot->at(RingPosPtr + 2), 3u);
1003 }
1004 
1005 #if SCUDO_CAN_USE_PRIMARY64
1006 #if SCUDO_TRUSTY
1007 
1008 // TrustyConfig is designed for a domain-specific allocator. Add a basic test
1009 // which covers only simple operations and ensure the configuration is able to
1010 // compile.
TEST(ScudoCombinedTest,BasicTrustyConfig)1011 TEST(ScudoCombinedTest, BasicTrustyConfig) {
1012   using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
1013   auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
1014 
1015   for (scudo::uptr ClassId = 1U;
1016        ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
1017        ClassId++) {
1018     const scudo::uptr Size =
1019         scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
1020     void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
1021     ASSERT_NE(p, nullptr);
1022     free(p);
1023   }
1024 
1025   bool UnlockRequired;
1026   typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
1027       *Allocator->getTSDRegistry());
1028   TSD->getCache().drain();
1029 
1030   Allocator->releaseToOS(scudo::ReleaseToOS::Force);
1031 }
1032 
1033 #endif
1034 #endif
1035