xref: /aosp_15_r20/external/compiler-rt/lib/scudo/scudo_allocator.cpp (revision 7c3d14c8b49c529e04be81a3ce6f5cc23712e4c6)
1*7c3d14c8STreehugger Robot //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2*7c3d14c8STreehugger Robot //
3*7c3d14c8STreehugger Robot //                     The LLVM Compiler Infrastructure
4*7c3d14c8STreehugger Robot //
5*7c3d14c8STreehugger Robot // This file is distributed under the University of Illinois Open Source
6*7c3d14c8STreehugger Robot // License. See LICENSE.TXT for details.
7*7c3d14c8STreehugger Robot //
8*7c3d14c8STreehugger Robot //===----------------------------------------------------------------------===//
9*7c3d14c8STreehugger Robot ///
10*7c3d14c8STreehugger Robot /// Scudo Hardened Allocator implementation.
11*7c3d14c8STreehugger Robot /// It uses the sanitizer_common allocator as a base and aims at mitigating
12*7c3d14c8STreehugger Robot /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13*7c3d14c8STreehugger Robot /// header, a delayed free list, and additional sanity checks.
14*7c3d14c8STreehugger Robot ///
15*7c3d14c8STreehugger Robot //===----------------------------------------------------------------------===//
16*7c3d14c8STreehugger Robot 
17*7c3d14c8STreehugger Robot #include "scudo_allocator.h"
18*7c3d14c8STreehugger Robot #include "scudo_utils.h"
19*7c3d14c8STreehugger Robot 
20*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_allocator_interface.h"
21*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_quarantine.h"
22*7c3d14c8STreehugger Robot 
23*7c3d14c8STreehugger Robot #include <limits.h>
24*7c3d14c8STreehugger Robot #include <pthread.h>
25*7c3d14c8STreehugger Robot #include <smmintrin.h>
26*7c3d14c8STreehugger Robot 
27*7c3d14c8STreehugger Robot #include <atomic>
28*7c3d14c8STreehugger Robot #include <cstring>
29*7c3d14c8STreehugger Robot 
30*7c3d14c8STreehugger Robot namespace __scudo {
31*7c3d14c8STreehugger Robot 
32*7c3d14c8STreehugger Robot const uptr AllocatorSpace = ~0ULL;
33*7c3d14c8STreehugger Robot const uptr AllocatorSize  =  0x10000000000ULL;
34*7c3d14c8STreehugger Robot const uptr MinAlignmentLog = 4; // 16 bytes for x64
35*7c3d14c8STreehugger Robot const uptr MaxAlignmentLog = 24;
36*7c3d14c8STreehugger Robot 
37*7c3d14c8STreehugger Robot typedef DefaultSizeClassMap SizeClassMap;
38*7c3d14c8STreehugger Robot typedef SizeClassAllocator64<AllocatorSpace, AllocatorSize, 0, SizeClassMap>
39*7c3d14c8STreehugger Robot   PrimaryAllocator;
40*7c3d14c8STreehugger Robot typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41*7c3d14c8STreehugger Robot typedef LargeMmapAllocator<> SecondaryAllocator;
42*7c3d14c8STreehugger Robot typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
43*7c3d14c8STreehugger Robot   ScudoAllocator;
44*7c3d14c8STreehugger Robot 
45*7c3d14c8STreehugger Robot static ScudoAllocator &getAllocator();
46*7c3d14c8STreehugger Robot 
47*7c3d14c8STreehugger Robot static thread_local Xorshift128Plus Prng;
48*7c3d14c8STreehugger Robot // Global static cookie, initialized at start-up.
49*7c3d14c8STreehugger Robot static u64 Cookie;
50*7c3d14c8STreehugger Robot 
51*7c3d14c8STreehugger Robot enum ChunkState : u8 {
52*7c3d14c8STreehugger Robot   ChunkAvailable  = 0,
53*7c3d14c8STreehugger Robot   ChunkAllocated  = 1,
54*7c3d14c8STreehugger Robot   ChunkQuarantine = 2
55*7c3d14c8STreehugger Robot };
56*7c3d14c8STreehugger Robot 
57*7c3d14c8STreehugger Robot typedef unsigned __int128 PackedHeader;
58*7c3d14c8STreehugger Robot typedef std::atomic<PackedHeader> AtomicPackedHeader;
59*7c3d14c8STreehugger Robot 
60*7c3d14c8STreehugger Robot // Our header requires 128-bit of storage on x64 (the only platform supported
61*7c3d14c8STreehugger Robot // as of now), which fits nicely with the alignment requirements.
62*7c3d14c8STreehugger Robot // Having the offset saves us from using functions such as GetBlockBegin, that
63*7c3d14c8STreehugger Robot // is fairly costly. Our first implementation used the MetaData as well, which
64*7c3d14c8STreehugger Robot // offers the advantage of being stored away from the chunk itself, but
65*7c3d14c8STreehugger Robot // accessing it was costly as well.
66*7c3d14c8STreehugger Robot // The header will be atomically loaded and stored using the 16-byte primitives
67*7c3d14c8STreehugger Robot // offered by the platform (likely requires cmpxchg16b support).
68*7c3d14c8STreehugger Robot struct UnpackedHeader {
69*7c3d14c8STreehugger Robot   // 1st 8 bytes
70*7c3d14c8STreehugger Robot   u16 Checksum      : 16;
71*7c3d14c8STreehugger Robot   u64 RequestedSize : 40; // Needed for reallocation purposes.
72*7c3d14c8STreehugger Robot   u8  State         : 2;  // available, allocated, or quarantined
73*7c3d14c8STreehugger Robot   u8  AllocType     : 2;  // malloc, new, new[], or memalign
74*7c3d14c8STreehugger Robot   u8  Unused_0_     : 4;
75*7c3d14c8STreehugger Robot   // 2nd 8 bytes
76*7c3d14c8STreehugger Robot   u64 Offset        : 20; // Offset from the beginning of the backend
77*7c3d14c8STreehugger Robot                           // allocation to the beginning chunk itself, in
78*7c3d14c8STreehugger Robot                           // multiples of MinAlignment. See comment about its
79*7c3d14c8STreehugger Robot                           // maximum value and test in Initialize.
80*7c3d14c8STreehugger Robot   u64 Unused_1_     : 28;
81*7c3d14c8STreehugger Robot   u16 Salt          : 16;
82*7c3d14c8STreehugger Robot };
83*7c3d14c8STreehugger Robot 
84*7c3d14c8STreehugger Robot COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
85*7c3d14c8STreehugger Robot 
86*7c3d14c8STreehugger Robot const uptr ChunkHeaderSize = sizeof(PackedHeader);
87*7c3d14c8STreehugger Robot 
88*7c3d14c8STreehugger Robot struct ScudoChunk : UnpackedHeader {
89*7c3d14c8STreehugger Robot   // We can't use the offset member of the chunk itself, as we would double
90*7c3d14c8STreehugger Robot   // fetch it without any warranty that it wouldn't have been tampered. To
91*7c3d14c8STreehugger Robot   // prevent this, we work with a local copy of the header.
AllocBeg__scudo::ScudoChunk92*7c3d14c8STreehugger Robot   void *AllocBeg(UnpackedHeader *Header) {
93*7c3d14c8STreehugger Robot     return reinterpret_cast<void *>(
94*7c3d14c8STreehugger Robot         reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
95*7c3d14c8STreehugger Robot   }
96*7c3d14c8STreehugger Robot 
97*7c3d14c8STreehugger Robot   // CRC32 checksum of the Chunk pointer and its ChunkHeader.
98*7c3d14c8STreehugger Robot   // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
Checksum__scudo::ScudoChunk99*7c3d14c8STreehugger Robot   u16 Checksum(UnpackedHeader *Header) const {
100*7c3d14c8STreehugger Robot     u64 HeaderHolder[2];
101*7c3d14c8STreehugger Robot     memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
102*7c3d14c8STreehugger Robot     u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
103*7c3d14c8STreehugger Robot     // This is somewhat of a shortcut. The checksum is stored in the 16 least
104*7c3d14c8STreehugger Robot     // significant bits of the first 8 bytes of the header, hence zero-ing
105*7c3d14c8STreehugger Robot     // those bits out. It would be more valid to zero the checksum field of the
106*7c3d14c8STreehugger Robot     // UnpackedHeader, but would require holding an additional copy of it.
107*7c3d14c8STreehugger Robot     Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
108*7c3d14c8STreehugger Robot     Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
109*7c3d14c8STreehugger Robot     return static_cast<u16>(Crc);
110*7c3d14c8STreehugger Robot   }
111*7c3d14c8STreehugger Robot 
112*7c3d14c8STreehugger Robot   // Loads and unpacks the header, verifying the checksum in the process.
loadHeader__scudo::ScudoChunk113*7c3d14c8STreehugger Robot   void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
114*7c3d14c8STreehugger Robot     const AtomicPackedHeader *AtomicHeader =
115*7c3d14c8STreehugger Robot         reinterpret_cast<const AtomicPackedHeader *>(this);
116*7c3d14c8STreehugger Robot     PackedHeader NewPackedHeader =
117*7c3d14c8STreehugger Robot         AtomicHeader->load(std::memory_order_relaxed);
118*7c3d14c8STreehugger Robot     *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
119*7c3d14c8STreehugger Robot     if ((NewUnpackedHeader->Unused_0_ != 0) ||
120*7c3d14c8STreehugger Robot         (NewUnpackedHeader->Unused_1_ != 0) ||
121*7c3d14c8STreehugger Robot         (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
122*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
123*7c3d14c8STreehugger Robot     }
124*7c3d14c8STreehugger Robot   }
125*7c3d14c8STreehugger Robot 
126*7c3d14c8STreehugger Robot   // Packs and stores the header, computing the checksum in the process.
storeHeader__scudo::ScudoChunk127*7c3d14c8STreehugger Robot   void storeHeader(UnpackedHeader *NewUnpackedHeader) {
128*7c3d14c8STreehugger Robot     NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
129*7c3d14c8STreehugger Robot     PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
130*7c3d14c8STreehugger Robot     AtomicPackedHeader *AtomicHeader =
131*7c3d14c8STreehugger Robot         reinterpret_cast<AtomicPackedHeader *>(this);
132*7c3d14c8STreehugger Robot     AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
133*7c3d14c8STreehugger Robot   }
134*7c3d14c8STreehugger Robot 
135*7c3d14c8STreehugger Robot   // Packs and stores the header, computing the checksum in the process. We
136*7c3d14c8STreehugger Robot   // compare the current header with the expected provided one to ensure that
137*7c3d14c8STreehugger Robot   // we are not being raced by a corruption occurring in another thread.
compareExchangeHeader__scudo::ScudoChunk138*7c3d14c8STreehugger Robot   void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
139*7c3d14c8STreehugger Robot                              UnpackedHeader *OldUnpackedHeader) {
140*7c3d14c8STreehugger Robot     NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
141*7c3d14c8STreehugger Robot     PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
142*7c3d14c8STreehugger Robot     PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
143*7c3d14c8STreehugger Robot     AtomicPackedHeader *AtomicHeader =
144*7c3d14c8STreehugger Robot         reinterpret_cast<AtomicPackedHeader *>(this);
145*7c3d14c8STreehugger Robot     if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
146*7c3d14c8STreehugger Robot                                                NewPackedHeader,
147*7c3d14c8STreehugger Robot                                                std::memory_order_relaxed,
148*7c3d14c8STreehugger Robot                                                std::memory_order_relaxed)) {
149*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: race on chunk header at address %p\n", this);
150*7c3d14c8STreehugger Robot     }
151*7c3d14c8STreehugger Robot   }
152*7c3d14c8STreehugger Robot };
153*7c3d14c8STreehugger Robot 
154*7c3d14c8STreehugger Robot static bool ScudoInitIsRunning = false;
155*7c3d14c8STreehugger Robot 
156*7c3d14c8STreehugger Robot static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
157*7c3d14c8STreehugger Robot static pthread_key_t pkey;
158*7c3d14c8STreehugger Robot 
159*7c3d14c8STreehugger Robot static thread_local bool ThreadInited = false;
160*7c3d14c8STreehugger Robot static thread_local bool ThreadTornDown = false;
161*7c3d14c8STreehugger Robot static thread_local AllocatorCache Cache;
162*7c3d14c8STreehugger Robot 
teardownThread(void * p)163*7c3d14c8STreehugger Robot static void teardownThread(void *p) {
164*7c3d14c8STreehugger Robot   uptr v = reinterpret_cast<uptr>(p);
165*7c3d14c8STreehugger Robot   // The glibc POSIX thread-local-storage deallocation routine calls user
166*7c3d14c8STreehugger Robot   // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
167*7c3d14c8STreehugger Robot   // We want to be called last since other destructors might call free and the
168*7c3d14c8STreehugger Robot   // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
169*7c3d14c8STreehugger Robot   // quarantine and swallowing the cache.
170*7c3d14c8STreehugger Robot   if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
171*7c3d14c8STreehugger Robot     pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
172*7c3d14c8STreehugger Robot     return;
173*7c3d14c8STreehugger Robot   }
174*7c3d14c8STreehugger Robot   drainQuarantine();
175*7c3d14c8STreehugger Robot   getAllocator().DestroyCache(&Cache);
176*7c3d14c8STreehugger Robot   ThreadTornDown = true;
177*7c3d14c8STreehugger Robot }
178*7c3d14c8STreehugger Robot 
initInternal()179*7c3d14c8STreehugger Robot static void initInternal() {
180*7c3d14c8STreehugger Robot   SanitizerToolName = "Scudo";
181*7c3d14c8STreehugger Robot   CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
182*7c3d14c8STreehugger Robot   ScudoInitIsRunning = true;
183*7c3d14c8STreehugger Robot 
184*7c3d14c8STreehugger Robot   initFlags();
185*7c3d14c8STreehugger Robot 
186*7c3d14c8STreehugger Robot   AllocatorOptions Options;
187*7c3d14c8STreehugger Robot   Options.setFrom(getFlags(), common_flags());
188*7c3d14c8STreehugger Robot   initAllocator(Options);
189*7c3d14c8STreehugger Robot 
190*7c3d14c8STreehugger Robot   ScudoInitIsRunning = false;
191*7c3d14c8STreehugger Robot }
192*7c3d14c8STreehugger Robot 
initGlobal()193*7c3d14c8STreehugger Robot static void initGlobal() {
194*7c3d14c8STreehugger Robot   pthread_key_create(&pkey, teardownThread);
195*7c3d14c8STreehugger Robot   initInternal();
196*7c3d14c8STreehugger Robot }
197*7c3d14c8STreehugger Robot 
initThread()198*7c3d14c8STreehugger Robot static void NOINLINE initThread() {
199*7c3d14c8STreehugger Robot   pthread_once(&GlobalInited, initGlobal);
200*7c3d14c8STreehugger Robot   pthread_setspecific(pkey, reinterpret_cast<void *>(1));
201*7c3d14c8STreehugger Robot   getAllocator().InitCache(&Cache);
202*7c3d14c8STreehugger Robot   ThreadInited = true;
203*7c3d14c8STreehugger Robot }
204*7c3d14c8STreehugger Robot 
205*7c3d14c8STreehugger Robot struct QuarantineCallback {
QuarantineCallback__scudo::QuarantineCallback206*7c3d14c8STreehugger Robot   explicit QuarantineCallback(AllocatorCache *Cache)
207*7c3d14c8STreehugger Robot     : Cache_(Cache) {}
208*7c3d14c8STreehugger Robot 
209*7c3d14c8STreehugger Robot   // Chunk recycling function, returns a quarantined chunk to the backend.
Recycle__scudo::QuarantineCallback210*7c3d14c8STreehugger Robot   void Recycle(ScudoChunk *Chunk) {
211*7c3d14c8STreehugger Robot     UnpackedHeader Header;
212*7c3d14c8STreehugger Robot     Chunk->loadHeader(&Header);
213*7c3d14c8STreehugger Robot     if (Header.State != ChunkQuarantine) {
214*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
215*7c3d14c8STreehugger Robot                      Chunk);
216*7c3d14c8STreehugger Robot     }
217*7c3d14c8STreehugger Robot     void *Ptr = Chunk->AllocBeg(&Header);
218*7c3d14c8STreehugger Robot     getAllocator().Deallocate(Cache_, Ptr);
219*7c3d14c8STreehugger Robot   }
220*7c3d14c8STreehugger Robot 
221*7c3d14c8STreehugger Robot   /// Internal quarantine allocation and deallocation functions.
Allocate__scudo::QuarantineCallback222*7c3d14c8STreehugger Robot   void *Allocate(uptr Size) {
223*7c3d14c8STreehugger Robot     // The internal quarantine memory cannot be protected by us. But the only
224*7c3d14c8STreehugger Robot     // structures allocated are QuarantineBatch, that are 8KB for x64. So we
225*7c3d14c8STreehugger Robot     // will use mmap for those, and given that Deallocate doesn't pass a size
226*7c3d14c8STreehugger Robot     // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
227*7c3d14c8STreehugger Robot     // TODO(kostyak): switching to mmap impacts greatly performances, we have
228*7c3d14c8STreehugger Robot     //                to find another solution
229*7c3d14c8STreehugger Robot     // CHECK_EQ(Size, sizeof(QuarantineBatch));
230*7c3d14c8STreehugger Robot     // return MmapOrDie(Size, "QuarantineBatch");
231*7c3d14c8STreehugger Robot     return getAllocator().Allocate(Cache_, Size, 1, false);
232*7c3d14c8STreehugger Robot   }
233*7c3d14c8STreehugger Robot 
Deallocate__scudo::QuarantineCallback234*7c3d14c8STreehugger Robot   void Deallocate(void *Ptr) {
235*7c3d14c8STreehugger Robot     // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
236*7c3d14c8STreehugger Robot     getAllocator().Deallocate(Cache_, Ptr);
237*7c3d14c8STreehugger Robot   }
238*7c3d14c8STreehugger Robot 
239*7c3d14c8STreehugger Robot   AllocatorCache *Cache_;
240*7c3d14c8STreehugger Robot };
241*7c3d14c8STreehugger Robot 
242*7c3d14c8STreehugger Robot typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
243*7c3d14c8STreehugger Robot typedef ScudoQuarantine::Cache QuarantineCache;
244*7c3d14c8STreehugger Robot static thread_local QuarantineCache ThreadQuarantineCache;
245*7c3d14c8STreehugger Robot 
setFrom(const Flags * f,const CommonFlags * cf)246*7c3d14c8STreehugger Robot void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
247*7c3d14c8STreehugger Robot   MayReturnNull = cf->allocator_may_return_null;
248*7c3d14c8STreehugger Robot   QuarantineSizeMb = f->QuarantineSizeMb;
249*7c3d14c8STreehugger Robot   ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
250*7c3d14c8STreehugger Robot   DeallocationTypeMismatch = f->DeallocationTypeMismatch;
251*7c3d14c8STreehugger Robot   DeleteSizeMismatch = f->DeleteSizeMismatch;
252*7c3d14c8STreehugger Robot   ZeroContents = f->ZeroContents;
253*7c3d14c8STreehugger Robot }
254*7c3d14c8STreehugger Robot 
copyTo(Flags * f,CommonFlags * cf) const255*7c3d14c8STreehugger Robot void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
256*7c3d14c8STreehugger Robot   cf->allocator_may_return_null = MayReturnNull;
257*7c3d14c8STreehugger Robot   f->QuarantineSizeMb = QuarantineSizeMb;
258*7c3d14c8STreehugger Robot   f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
259*7c3d14c8STreehugger Robot   f->DeallocationTypeMismatch = DeallocationTypeMismatch;
260*7c3d14c8STreehugger Robot   f->DeleteSizeMismatch = DeleteSizeMismatch;
261*7c3d14c8STreehugger Robot   f->ZeroContents = ZeroContents;
262*7c3d14c8STreehugger Robot }
263*7c3d14c8STreehugger Robot 
264*7c3d14c8STreehugger Robot struct Allocator {
265*7c3d14c8STreehugger Robot   static const uptr MaxAllowedMallocSize = 1ULL << 40;
266*7c3d14c8STreehugger Robot   static const uptr MinAlignment = 1 << MinAlignmentLog;
267*7c3d14c8STreehugger Robot   static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
268*7c3d14c8STreehugger Robot 
269*7c3d14c8STreehugger Robot   ScudoAllocator BackendAllocator;
270*7c3d14c8STreehugger Robot   ScudoQuarantine AllocatorQuarantine;
271*7c3d14c8STreehugger Robot 
272*7c3d14c8STreehugger Robot   // The fallback caches are used when the thread local caches have been
273*7c3d14c8STreehugger Robot   // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
274*7c3d14c8STreehugger Robot   // be accessed by different threads.
275*7c3d14c8STreehugger Robot   StaticSpinMutex FallbackMutex;
276*7c3d14c8STreehugger Robot   AllocatorCache FallbackAllocatorCache;
277*7c3d14c8STreehugger Robot   QuarantineCache FallbackQuarantineCache;
278*7c3d14c8STreehugger Robot 
279*7c3d14c8STreehugger Robot   bool DeallocationTypeMismatch;
280*7c3d14c8STreehugger Robot   bool ZeroContents;
281*7c3d14c8STreehugger Robot   bool DeleteSizeMismatch;
282*7c3d14c8STreehugger Robot 
Allocator__scudo::Allocator283*7c3d14c8STreehugger Robot   explicit Allocator(LinkerInitialized)
284*7c3d14c8STreehugger Robot     : AllocatorQuarantine(LINKER_INITIALIZED),
285*7c3d14c8STreehugger Robot       FallbackQuarantineCache(LINKER_INITIALIZED) {}
286*7c3d14c8STreehugger Robot 
init__scudo::Allocator287*7c3d14c8STreehugger Robot   void init(const AllocatorOptions &Options) {
288*7c3d14c8STreehugger Robot     // Currently SSE 4.2 support is required. This might change later.
289*7c3d14c8STreehugger Robot     CHECK(testCPUFeature(SSE4_2)); // for crc32
290*7c3d14c8STreehugger Robot 
291*7c3d14c8STreehugger Robot     // Verify that the header offset field can hold the maximum offset. In the
292*7c3d14c8STreehugger Robot     // worst case scenario, the backend allocation is already aligned on
293*7c3d14c8STreehugger Robot     // MaxAlignment, so in order to store the header and still be aligned, we
294*7c3d14c8STreehugger Robot     // add an extra MaxAlignment. As a result, the offset from the beginning of
295*7c3d14c8STreehugger Robot     // the backend allocation to the chunk will be MaxAlignment -
296*7c3d14c8STreehugger Robot     // ChunkHeaderSize.
297*7c3d14c8STreehugger Robot     UnpackedHeader Header = {};
298*7c3d14c8STreehugger Robot     uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
299*7c3d14c8STreehugger Robot     Header.Offset = MaximumOffset;
300*7c3d14c8STreehugger Robot     if (Header.Offset != MaximumOffset) {
301*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
302*7c3d14c8STreehugger Robot                      "header\n");
303*7c3d14c8STreehugger Robot     }
304*7c3d14c8STreehugger Robot 
305*7c3d14c8STreehugger Robot     DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
306*7c3d14c8STreehugger Robot     DeleteSizeMismatch = Options.DeleteSizeMismatch;
307*7c3d14c8STreehugger Robot     ZeroContents = Options.ZeroContents;
308*7c3d14c8STreehugger Robot     BackendAllocator.Init(Options.MayReturnNull);
309*7c3d14c8STreehugger Robot     AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
310*7c3d14c8STreehugger Robot                              static_cast<uptr>(
311*7c3d14c8STreehugger Robot                                  Options.ThreadLocalQuarantineSizeKb) << 10);
312*7c3d14c8STreehugger Robot     BackendAllocator.InitCache(&FallbackAllocatorCache);
313*7c3d14c8STreehugger Robot     Cookie = Prng.Next();
314*7c3d14c8STreehugger Robot   }
315*7c3d14c8STreehugger Robot 
316*7c3d14c8STreehugger Robot   // Allocates a chunk.
allocate__scudo::Allocator317*7c3d14c8STreehugger Robot   void *allocate(uptr Size, uptr Alignment, AllocType Type) {
318*7c3d14c8STreehugger Robot     if (UNLIKELY(!ThreadInited))
319*7c3d14c8STreehugger Robot       initThread();
320*7c3d14c8STreehugger Robot     if (!IsPowerOfTwo(Alignment)) {
321*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
322*7c3d14c8STreehugger Robot     }
323*7c3d14c8STreehugger Robot     if (Alignment > MaxAlignment)
324*7c3d14c8STreehugger Robot       return BackendAllocator.ReturnNullOrDie();
325*7c3d14c8STreehugger Robot     if (Alignment < MinAlignment)
326*7c3d14c8STreehugger Robot       Alignment = MinAlignment;
327*7c3d14c8STreehugger Robot     if (Size == 0)
328*7c3d14c8STreehugger Robot       Size = 1;
329*7c3d14c8STreehugger Robot     if (Size >= MaxAllowedMallocSize)
330*7c3d14c8STreehugger Robot       return BackendAllocator.ReturnNullOrDie();
331*7c3d14c8STreehugger Robot     uptr RoundedSize = RoundUpTo(Size, MinAlignment);
332*7c3d14c8STreehugger Robot     uptr ExtraBytes = ChunkHeaderSize;
333*7c3d14c8STreehugger Robot     if (Alignment > MinAlignment)
334*7c3d14c8STreehugger Robot       ExtraBytes += Alignment;
335*7c3d14c8STreehugger Robot     uptr NeededSize = RoundedSize + ExtraBytes;
336*7c3d14c8STreehugger Robot     if (NeededSize >= MaxAllowedMallocSize)
337*7c3d14c8STreehugger Robot       return BackendAllocator.ReturnNullOrDie();
338*7c3d14c8STreehugger Robot 
339*7c3d14c8STreehugger Robot     void *Ptr;
340*7c3d14c8STreehugger Robot     if (LIKELY(!ThreadTornDown)) {
341*7c3d14c8STreehugger Robot       Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
342*7c3d14c8STreehugger Robot     } else {
343*7c3d14c8STreehugger Robot       SpinMutexLock l(&FallbackMutex);
344*7c3d14c8STreehugger Robot       Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
345*7c3d14c8STreehugger Robot                                MinAlignment);
346*7c3d14c8STreehugger Robot     }
347*7c3d14c8STreehugger Robot     if (!Ptr)
348*7c3d14c8STreehugger Robot       return BackendAllocator.ReturnNullOrDie();
349*7c3d14c8STreehugger Robot 
350*7c3d14c8STreehugger Robot     // If requested, we will zero out the entire contents of the returned chunk.
351*7c3d14c8STreehugger Robot     if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
352*7c3d14c8STreehugger Robot        memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
353*7c3d14c8STreehugger Robot 
354*7c3d14c8STreehugger Robot     uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
355*7c3d14c8STreehugger Robot     uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
356*7c3d14c8STreehugger Robot     if (!IsAligned(ChunkBeg, Alignment))
357*7c3d14c8STreehugger Robot       ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
358*7c3d14c8STreehugger Robot     CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
359*7c3d14c8STreehugger Robot     ScudoChunk *Chunk =
360*7c3d14c8STreehugger Robot         reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
361*7c3d14c8STreehugger Robot     UnpackedHeader Header = {};
362*7c3d14c8STreehugger Robot     Header.State = ChunkAllocated;
363*7c3d14c8STreehugger Robot     Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
364*7c3d14c8STreehugger Robot     Header.AllocType = Type;
365*7c3d14c8STreehugger Robot     Header.RequestedSize = Size;
366*7c3d14c8STreehugger Robot     Header.Salt = static_cast<u16>(Prng.Next());
367*7c3d14c8STreehugger Robot     Chunk->storeHeader(&Header);
368*7c3d14c8STreehugger Robot     void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
369*7c3d14c8STreehugger Robot     // TODO(kostyak): hooks sound like a terrible idea security wise but might
370*7c3d14c8STreehugger Robot     //                be needed for things to work properly?
371*7c3d14c8STreehugger Robot     // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
372*7c3d14c8STreehugger Robot     return UserPtr;
373*7c3d14c8STreehugger Robot   }
374*7c3d14c8STreehugger Robot 
375*7c3d14c8STreehugger Robot   // Deallocates a Chunk, which means adding it to the delayed free list (or
376*7c3d14c8STreehugger Robot   // Quarantine).
deallocate__scudo::Allocator377*7c3d14c8STreehugger Robot   void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
378*7c3d14c8STreehugger Robot     if (UNLIKELY(!ThreadInited))
379*7c3d14c8STreehugger Robot       initThread();
380*7c3d14c8STreehugger Robot     // TODO(kostyak): see hook comment above
381*7c3d14c8STreehugger Robot     // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
382*7c3d14c8STreehugger Robot     if (!UserPtr)
383*7c3d14c8STreehugger Robot       return;
384*7c3d14c8STreehugger Robot     uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
385*7c3d14c8STreehugger Robot     if (!IsAligned(ChunkBeg, MinAlignment)) {
386*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
387*7c3d14c8STreehugger Robot                      "aligned at address %p\n", UserPtr);
388*7c3d14c8STreehugger Robot     }
389*7c3d14c8STreehugger Robot     ScudoChunk *Chunk =
390*7c3d14c8STreehugger Robot         reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
391*7c3d14c8STreehugger Robot     UnpackedHeader OldHeader;
392*7c3d14c8STreehugger Robot     Chunk->loadHeader(&OldHeader);
393*7c3d14c8STreehugger Robot     if (OldHeader.State != ChunkAllocated) {
394*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: invalid chunk state when deallocating address "
395*7c3d14c8STreehugger Robot                      "%p\n", Chunk);
396*7c3d14c8STreehugger Robot     }
397*7c3d14c8STreehugger Robot     UnpackedHeader NewHeader = OldHeader;
398*7c3d14c8STreehugger Robot     NewHeader.State = ChunkQuarantine;
399*7c3d14c8STreehugger Robot     Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
400*7c3d14c8STreehugger Robot     if (DeallocationTypeMismatch) {
401*7c3d14c8STreehugger Robot       // The deallocation type has to match the allocation one.
402*7c3d14c8STreehugger Robot       if (NewHeader.AllocType != Type) {
403*7c3d14c8STreehugger Robot         // With the exception of memalign'd Chunks, that can be still be free'd.
404*7c3d14c8STreehugger Robot         if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
405*7c3d14c8STreehugger Robot           dieWithMessage("ERROR: allocation type mismatch on address %p\n",
406*7c3d14c8STreehugger Robot                          Chunk);
407*7c3d14c8STreehugger Robot         }
408*7c3d14c8STreehugger Robot       }
409*7c3d14c8STreehugger Robot     }
410*7c3d14c8STreehugger Robot     uptr Size = NewHeader.RequestedSize;
411*7c3d14c8STreehugger Robot     if (DeleteSizeMismatch) {
412*7c3d14c8STreehugger Robot       if (DeleteSize && DeleteSize != Size) {
413*7c3d14c8STreehugger Robot         dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
414*7c3d14c8STreehugger Robot                        Chunk);
415*7c3d14c8STreehugger Robot       }
416*7c3d14c8STreehugger Robot     }
417*7c3d14c8STreehugger Robot     if (LIKELY(!ThreadTornDown)) {
418*7c3d14c8STreehugger Robot       AllocatorQuarantine.Put(&ThreadQuarantineCache,
419*7c3d14c8STreehugger Robot                               QuarantineCallback(&Cache), Chunk, Size);
420*7c3d14c8STreehugger Robot     } else {
421*7c3d14c8STreehugger Robot       SpinMutexLock l(&FallbackMutex);
422*7c3d14c8STreehugger Robot       AllocatorQuarantine.Put(&FallbackQuarantineCache,
423*7c3d14c8STreehugger Robot                               QuarantineCallback(&FallbackAllocatorCache),
424*7c3d14c8STreehugger Robot                               Chunk, Size);
425*7c3d14c8STreehugger Robot     }
426*7c3d14c8STreehugger Robot   }
427*7c3d14c8STreehugger Robot 
428*7c3d14c8STreehugger Robot   // Returns the actual usable size of a chunk. Since this requires loading the
429*7c3d14c8STreehugger Robot   // header, we will return it in the second parameter, as it can be required
430*7c3d14c8STreehugger Robot   // by the caller to perform additional processing.
getUsableSize__scudo::Allocator431*7c3d14c8STreehugger Robot   uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
432*7c3d14c8STreehugger Robot     if (UNLIKELY(!ThreadInited))
433*7c3d14c8STreehugger Robot       initThread();
434*7c3d14c8STreehugger Robot     if (!Ptr)
435*7c3d14c8STreehugger Robot       return 0;
436*7c3d14c8STreehugger Robot     uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
437*7c3d14c8STreehugger Robot     ScudoChunk *Chunk =
438*7c3d14c8STreehugger Robot         reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
439*7c3d14c8STreehugger Robot     Chunk->loadHeader(Header);
440*7c3d14c8STreehugger Robot     // Getting the usable size of a chunk only makes sense if it's allocated.
441*7c3d14c8STreehugger Robot     if (Header->State != ChunkAllocated) {
442*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
443*7c3d14c8STreehugger Robot                      "address %p\n", Chunk);
444*7c3d14c8STreehugger Robot     }
445*7c3d14c8STreehugger Robot     uptr Size =
446*7c3d14c8STreehugger Robot         BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
447*7c3d14c8STreehugger Robot     // UsableSize works as malloc_usable_size, which is also what (AFAIU)
448*7c3d14c8STreehugger Robot     // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
449*7c3d14c8STreehugger Robot     // means we will return the size of the chunk from the user beginning to
450*7c3d14c8STreehugger Robot     // the end of the 'user' allocation, hence us subtracting the header size
451*7c3d14c8STreehugger Robot     // and the offset from the size.
452*7c3d14c8STreehugger Robot     if (Size == 0)
453*7c3d14c8STreehugger Robot       return Size;
454*7c3d14c8STreehugger Robot     return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
455*7c3d14c8STreehugger Robot   }
456*7c3d14c8STreehugger Robot 
457*7c3d14c8STreehugger Robot   // Helper function that doesn't care about the header.
getUsableSize__scudo::Allocator458*7c3d14c8STreehugger Robot   uptr getUsableSize(const void *Ptr) {
459*7c3d14c8STreehugger Robot     UnpackedHeader Header;
460*7c3d14c8STreehugger Robot     return getUsableSize(Ptr, &Header);
461*7c3d14c8STreehugger Robot   }
462*7c3d14c8STreehugger Robot 
463*7c3d14c8STreehugger Robot   // Reallocates a chunk. We can save on a new allocation if the new requested
464*7c3d14c8STreehugger Robot   // size still fits in the chunk.
reallocate__scudo::Allocator465*7c3d14c8STreehugger Robot   void *reallocate(void *OldPtr, uptr NewSize) {
466*7c3d14c8STreehugger Robot     if (UNLIKELY(!ThreadInited))
467*7c3d14c8STreehugger Robot       initThread();
468*7c3d14c8STreehugger Robot     UnpackedHeader OldHeader;
469*7c3d14c8STreehugger Robot     uptr Size = getUsableSize(OldPtr, &OldHeader);
470*7c3d14c8STreehugger Robot     uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
471*7c3d14c8STreehugger Robot     ScudoChunk *Chunk =
472*7c3d14c8STreehugger Robot         reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
473*7c3d14c8STreehugger Robot     if (OldHeader.AllocType != FromMalloc) {
474*7c3d14c8STreehugger Robot       dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
475*7c3d14c8STreehugger Robot                      Chunk);
476*7c3d14c8STreehugger Robot     }
477*7c3d14c8STreehugger Robot     UnpackedHeader NewHeader = OldHeader;
478*7c3d14c8STreehugger Robot     // The new size still fits in the current chunk.
479*7c3d14c8STreehugger Robot     if (NewSize <= Size) {
480*7c3d14c8STreehugger Robot       NewHeader.RequestedSize = NewSize;
481*7c3d14c8STreehugger Robot       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
482*7c3d14c8STreehugger Robot       return OldPtr;
483*7c3d14c8STreehugger Robot     }
484*7c3d14c8STreehugger Robot     // Otherwise, we have to allocate a new chunk and copy the contents of the
485*7c3d14c8STreehugger Robot     // old one.
486*7c3d14c8STreehugger Robot     void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
487*7c3d14c8STreehugger Robot     if (NewPtr) {
488*7c3d14c8STreehugger Robot       uptr OldSize = OldHeader.RequestedSize;
489*7c3d14c8STreehugger Robot       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
490*7c3d14c8STreehugger Robot       NewHeader.State = ChunkQuarantine;
491*7c3d14c8STreehugger Robot       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
492*7c3d14c8STreehugger Robot       if (LIKELY(!ThreadTornDown)) {
493*7c3d14c8STreehugger Robot         AllocatorQuarantine.Put(&ThreadQuarantineCache,
494*7c3d14c8STreehugger Robot                                 QuarantineCallback(&Cache), Chunk, OldSize);
495*7c3d14c8STreehugger Robot       } else {
496*7c3d14c8STreehugger Robot         SpinMutexLock l(&FallbackMutex);
497*7c3d14c8STreehugger Robot         AllocatorQuarantine.Put(&FallbackQuarantineCache,
498*7c3d14c8STreehugger Robot                                 QuarantineCallback(&FallbackAllocatorCache),
499*7c3d14c8STreehugger Robot                                 Chunk, OldSize);
500*7c3d14c8STreehugger Robot       }
501*7c3d14c8STreehugger Robot     }
502*7c3d14c8STreehugger Robot     return NewPtr;
503*7c3d14c8STreehugger Robot   }
504*7c3d14c8STreehugger Robot 
calloc__scudo::Allocator505*7c3d14c8STreehugger Robot   void *calloc(uptr NMemB, uptr Size) {
506*7c3d14c8STreehugger Robot     if (UNLIKELY(!ThreadInited))
507*7c3d14c8STreehugger Robot       initThread();
508*7c3d14c8STreehugger Robot     uptr Total = NMemB * Size;
509*7c3d14c8STreehugger Robot     if (Size != 0 && Total / Size != NMemB) // Overflow check
510*7c3d14c8STreehugger Robot       return BackendAllocator.ReturnNullOrDie();
511*7c3d14c8STreehugger Robot     void *Ptr = allocate(Total, MinAlignment, FromMalloc);
512*7c3d14c8STreehugger Robot     // If ZeroContents, the content of the chunk has already been zero'd out.
513*7c3d14c8STreehugger Robot     if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
514*7c3d14c8STreehugger Robot       memset(Ptr, 0, getUsableSize(Ptr));
515*7c3d14c8STreehugger Robot     return Ptr;
516*7c3d14c8STreehugger Robot   }
517*7c3d14c8STreehugger Robot 
drainQuarantine__scudo::Allocator518*7c3d14c8STreehugger Robot   void drainQuarantine() {
519*7c3d14c8STreehugger Robot     AllocatorQuarantine.Drain(&ThreadQuarantineCache,
520*7c3d14c8STreehugger Robot                               QuarantineCallback(&Cache));
521*7c3d14c8STreehugger Robot   }
522*7c3d14c8STreehugger Robot };
523*7c3d14c8STreehugger Robot 
524*7c3d14c8STreehugger Robot static Allocator Instance(LINKER_INITIALIZED);
525*7c3d14c8STreehugger Robot 
getAllocator()526*7c3d14c8STreehugger Robot static ScudoAllocator &getAllocator() {
527*7c3d14c8STreehugger Robot   return Instance.BackendAllocator;
528*7c3d14c8STreehugger Robot }
529*7c3d14c8STreehugger Robot 
initAllocator(const AllocatorOptions & Options)530*7c3d14c8STreehugger Robot void initAllocator(const AllocatorOptions &Options) {
531*7c3d14c8STreehugger Robot   Instance.init(Options);
532*7c3d14c8STreehugger Robot }
533*7c3d14c8STreehugger Robot 
drainQuarantine()534*7c3d14c8STreehugger Robot void drainQuarantine() {
535*7c3d14c8STreehugger Robot   Instance.drainQuarantine();
536*7c3d14c8STreehugger Robot }
537*7c3d14c8STreehugger Robot 
scudoMalloc(uptr Size,AllocType Type)538*7c3d14c8STreehugger Robot void *scudoMalloc(uptr Size, AllocType Type) {
539*7c3d14c8STreehugger Robot   return Instance.allocate(Size, Allocator::MinAlignment, Type);
540*7c3d14c8STreehugger Robot }
541*7c3d14c8STreehugger Robot 
scudoFree(void * Ptr,AllocType Type)542*7c3d14c8STreehugger Robot void scudoFree(void *Ptr, AllocType Type) {
543*7c3d14c8STreehugger Robot   Instance.deallocate(Ptr, 0, Type);
544*7c3d14c8STreehugger Robot }
545*7c3d14c8STreehugger Robot 
scudoSizedFree(void * Ptr,uptr Size,AllocType Type)546*7c3d14c8STreehugger Robot void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
547*7c3d14c8STreehugger Robot   Instance.deallocate(Ptr, Size, Type);
548*7c3d14c8STreehugger Robot }
549*7c3d14c8STreehugger Robot 
scudoRealloc(void * Ptr,uptr Size)550*7c3d14c8STreehugger Robot void *scudoRealloc(void *Ptr, uptr Size) {
551*7c3d14c8STreehugger Robot   if (!Ptr)
552*7c3d14c8STreehugger Robot     return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
553*7c3d14c8STreehugger Robot   if (Size == 0) {
554*7c3d14c8STreehugger Robot     Instance.deallocate(Ptr, 0, FromMalloc);
555*7c3d14c8STreehugger Robot     return nullptr;
556*7c3d14c8STreehugger Robot   }
557*7c3d14c8STreehugger Robot   return Instance.reallocate(Ptr, Size);
558*7c3d14c8STreehugger Robot }
559*7c3d14c8STreehugger Robot 
scudoCalloc(uptr NMemB,uptr Size)560*7c3d14c8STreehugger Robot void *scudoCalloc(uptr NMemB, uptr Size) {
561*7c3d14c8STreehugger Robot   return Instance.calloc(NMemB, Size);
562*7c3d14c8STreehugger Robot }
563*7c3d14c8STreehugger Robot 
scudoValloc(uptr Size)564*7c3d14c8STreehugger Robot void *scudoValloc(uptr Size) {
565*7c3d14c8STreehugger Robot   return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
566*7c3d14c8STreehugger Robot }
567*7c3d14c8STreehugger Robot 
scudoMemalign(uptr Alignment,uptr Size)568*7c3d14c8STreehugger Robot void *scudoMemalign(uptr Alignment, uptr Size) {
569*7c3d14c8STreehugger Robot   return Instance.allocate(Size, Alignment, FromMemalign);
570*7c3d14c8STreehugger Robot }
571*7c3d14c8STreehugger Robot 
scudoPvalloc(uptr Size)572*7c3d14c8STreehugger Robot void *scudoPvalloc(uptr Size) {
573*7c3d14c8STreehugger Robot   uptr PageSize = GetPageSizeCached();
574*7c3d14c8STreehugger Robot   Size = RoundUpTo(Size, PageSize);
575*7c3d14c8STreehugger Robot   if (Size == 0) {
576*7c3d14c8STreehugger Robot     // pvalloc(0) should allocate one page.
577*7c3d14c8STreehugger Robot     Size = PageSize;
578*7c3d14c8STreehugger Robot   }
579*7c3d14c8STreehugger Robot   return Instance.allocate(Size, PageSize, FromMemalign);
580*7c3d14c8STreehugger Robot }
581*7c3d14c8STreehugger Robot 
scudoPosixMemalign(void ** MemPtr,uptr Alignment,uptr Size)582*7c3d14c8STreehugger Robot int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
583*7c3d14c8STreehugger Robot   *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
584*7c3d14c8STreehugger Robot   return 0;
585*7c3d14c8STreehugger Robot }
586*7c3d14c8STreehugger Robot 
scudoAlignedAlloc(uptr Alignment,uptr Size)587*7c3d14c8STreehugger Robot void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
588*7c3d14c8STreehugger Robot   // size must be a multiple of the alignment. To avoid a division, we first
589*7c3d14c8STreehugger Robot   // make sure that alignment is a power of 2.
590*7c3d14c8STreehugger Robot   CHECK(IsPowerOfTwo(Alignment));
591*7c3d14c8STreehugger Robot   CHECK_EQ((Size & (Alignment - 1)), 0);
592*7c3d14c8STreehugger Robot   return Instance.allocate(Size, Alignment, FromMalloc);
593*7c3d14c8STreehugger Robot }
594*7c3d14c8STreehugger Robot 
scudoMallocUsableSize(void * Ptr)595*7c3d14c8STreehugger Robot uptr scudoMallocUsableSize(void *Ptr) {
596*7c3d14c8STreehugger Robot   return Instance.getUsableSize(Ptr);
597*7c3d14c8STreehugger Robot }
598*7c3d14c8STreehugger Robot 
599*7c3d14c8STreehugger Robot } // namespace __scudo
600*7c3d14c8STreehugger Robot 
601*7c3d14c8STreehugger Robot using namespace __scudo;
602*7c3d14c8STreehugger Robot 
603*7c3d14c8STreehugger Robot // MallocExtension helper functions
604*7c3d14c8STreehugger Robot 
__sanitizer_get_current_allocated_bytes()605*7c3d14c8STreehugger Robot uptr __sanitizer_get_current_allocated_bytes() {
606*7c3d14c8STreehugger Robot   uptr stats[AllocatorStatCount];
607*7c3d14c8STreehugger Robot   getAllocator().GetStats(stats);
608*7c3d14c8STreehugger Robot   return stats[AllocatorStatAllocated];
609*7c3d14c8STreehugger Robot }
610*7c3d14c8STreehugger Robot 
__sanitizer_get_heap_size()611*7c3d14c8STreehugger Robot uptr __sanitizer_get_heap_size() {
612*7c3d14c8STreehugger Robot   uptr stats[AllocatorStatCount];
613*7c3d14c8STreehugger Robot   getAllocator().GetStats(stats);
614*7c3d14c8STreehugger Robot   return stats[AllocatorStatMapped];
615*7c3d14c8STreehugger Robot }
616*7c3d14c8STreehugger Robot 
__sanitizer_get_free_bytes()617*7c3d14c8STreehugger Robot uptr __sanitizer_get_free_bytes() {
618*7c3d14c8STreehugger Robot   return 1;
619*7c3d14c8STreehugger Robot }
620*7c3d14c8STreehugger Robot 
__sanitizer_get_unmapped_bytes()621*7c3d14c8STreehugger Robot uptr __sanitizer_get_unmapped_bytes() {
622*7c3d14c8STreehugger Robot   return 1;
623*7c3d14c8STreehugger Robot }
624*7c3d14c8STreehugger Robot 
__sanitizer_get_estimated_allocated_size(uptr size)625*7c3d14c8STreehugger Robot uptr __sanitizer_get_estimated_allocated_size(uptr size) {
626*7c3d14c8STreehugger Robot   return size;
627*7c3d14c8STreehugger Robot }
628*7c3d14c8STreehugger Robot 
__sanitizer_get_ownership(const void * p)629*7c3d14c8STreehugger Robot int __sanitizer_get_ownership(const void *p) {
630*7c3d14c8STreehugger Robot   return Instance.getUsableSize(p) != 0;
631*7c3d14c8STreehugger Robot }
632*7c3d14c8STreehugger Robot 
__sanitizer_get_allocated_size(const void * p)633*7c3d14c8STreehugger Robot uptr __sanitizer_get_allocated_size(const void *p) {
634*7c3d14c8STreehugger Robot   return Instance.getUsableSize(p);
635*7c3d14c8STreehugger Robot }
636