xref: /aosp_15_r20/external/scudo/standalone/combined.h (revision 76559068c068bd27e82aff38fac3bfc865233bca)
1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
11 
12 #include "allocator_config_wrapper.h"
13 #include "atomic_helpers.h"
14 #include "chunk.h"
15 #include "common.h"
16 #include "flags.h"
17 #include "flags_parser.h"
18 #include "local_cache.h"
19 #include "mem_map.h"
20 #include "memtag.h"
21 #include "mutex.h"
22 #include "options.h"
23 #include "quarantine.h"
24 #include "report.h"
25 #include "secondary.h"
26 #include "stack_depot.h"
27 #include "string_utils.h"
28 #include "tsd.h"
29 
30 #include "scudo/interface.h"
31 
32 #ifdef GWP_ASAN_HOOKS
33 #include "gwp_asan/guarded_pool_allocator.h"
34 #include "gwp_asan/optional/backtrace.h"
35 #include "gwp_asan/optional/segv_handler.h"
36 #endif // GWP_ASAN_HOOKS
37 
EmptyCallback()38 extern "C" inline void EmptyCallback() {}
39 
40 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
41 // This function is not part of the NDK so it does not appear in any public
42 // header files. We only declare/use it when targeting the platform.
43 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
44                                                      size_t num_entries);
45 #endif
46 
47 namespace scudo {
48 
49 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
50 class Allocator {
51 public:
52   using AllocatorConfig = BaseConfig<Config>;
53   using PrimaryT =
54       typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
55   using SecondaryT =
56       typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
57   using CacheT = typename PrimaryT::CacheT;
58   typedef Allocator<Config, PostInitCallback> ThisT;
59   typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
60 
callPostInitCallback()61   void callPostInitCallback() {
62     pthread_once(&PostInitNonce, PostInitCallback);
63   }
64 
65   struct QuarantineCallback {
QuarantineCallbackQuarantineCallback66     explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
67         : Allocator(Instance), Cache(LocalCache) {}
68 
69     // Chunk recycling function, returns a quarantined chunk to the backend,
70     // first making sure it hasn't been tampered with.
recycleQuarantineCallback71     void recycle(void *Ptr) {
72       Chunk::UnpackedHeader Header;
73       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
74       if (UNLIKELY(Header.State != Chunk::State::Quarantined))
75         reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
76 
77       Header.State = Chunk::State::Available;
78       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
79 
80       if (allocatorSupportsMemoryTagging<AllocatorConfig>())
81         Ptr = untagPointer(Ptr);
82       void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
83       Cache.deallocate(Header.ClassId, BlockBegin);
84     }
85 
86     // We take a shortcut when allocating a quarantine batch by working with the
87     // appropriate class ID instead of using Size. The compiler should optimize
88     // the class ID computation and work with the associated cache directly.
allocateQuarantineCallback89     void *allocate(UNUSED uptr Size) {
90       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
91           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
92       void *Ptr = Cache.allocate(QuarantineClassId);
93       // Quarantine batch allocation failure is fatal.
94       if (UNLIKELY(!Ptr))
95         reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
96 
97       Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
98                                      Chunk::getHeaderSize());
99       Chunk::UnpackedHeader Header = {};
100       Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
101       Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
102       Header.State = Chunk::State::Allocated;
103       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
104 
105       // Reset tag to 0 as this chunk may have been previously used for a tagged
106       // user allocation.
107       if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
108               Allocator.Primary.Options.load())))
109         storeTags(reinterpret_cast<uptr>(Ptr),
110                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
111 
112       return Ptr;
113     }
114 
deallocateQuarantineCallback115     void deallocate(void *Ptr) {
116       const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
117           sizeof(QuarantineBatch) + Chunk::getHeaderSize());
118       Chunk::UnpackedHeader Header;
119       Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
120 
121       if (UNLIKELY(Header.State != Chunk::State::Allocated))
122         reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
123       DCHECK_EQ(Header.ClassId, QuarantineClassId);
124       DCHECK_EQ(Header.Offset, 0);
125       DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
126 
127       Header.State = Chunk::State::Available;
128       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
129       Cache.deallocate(QuarantineClassId,
130                        reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
131                                                 Chunk::getHeaderSize()));
132     }
133 
134   private:
135     ThisT &Allocator;
136     CacheT &Cache;
137   };
138 
139   typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
140   typedef typename QuarantineT::CacheT QuarantineCacheT;
141 
init()142   void init() {
143     // Make sure that the page size is initialized if it's not a constant.
144     CHECK_NE(getPageSizeCached(), 0U);
145 
146     performSanityChecks();
147 
148     // Check if hardware CRC32 is supported in the binary and by the platform,
149     // if so, opt for the CRC32 hardware version of the checksum.
150     if (&computeHardwareCRC32 && hasHardwareCRC32())
151       HashAlgorithm = Checksum::HardwareCRC32;
152 
153     if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
154       Cookie = static_cast<u32>(getMonotonicTime() ^
155                                 (reinterpret_cast<uptr>(this) >> 4));
156 
157     initFlags();
158     reportUnrecognizedFlags();
159 
160     // Store some flags locally.
161     if (getFlags()->may_return_null)
162       Primary.Options.set(OptionBit::MayReturnNull);
163     if (getFlags()->zero_contents)
164       Primary.Options.setFillContentsMode(ZeroFill);
165     else if (getFlags()->pattern_fill_contents)
166       Primary.Options.setFillContentsMode(PatternOrZeroFill);
167     if (getFlags()->dealloc_type_mismatch)
168       Primary.Options.set(OptionBit::DeallocTypeMismatch);
169     if (getFlags()->delete_size_mismatch)
170       Primary.Options.set(OptionBit::DeleteSizeMismatch);
171     if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
172         systemSupportsMemoryTagging())
173       Primary.Options.set(OptionBit::UseMemoryTagging);
174 
175     QuarantineMaxChunkSize =
176         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
177 
178     Stats.init();
179     // TODO(chiahungduan): Given that we support setting the default value in
180     // the PrimaryConfig and CacheConfig, consider to deprecate the use of
181     // `release_to_os_interval_ms` flag.
182     const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
183     Primary.init(ReleaseToOsIntervalMs);
184     Secondary.init(&Stats, ReleaseToOsIntervalMs);
185     Quarantine.init(
186         static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
187         static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
188   }
189 
enableRingBuffer()190   void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
191     AllocationRingBuffer *RB = getRingBuffer();
192     if (RB)
193       RB->Depot->enable();
194     RingBufferInitLock.unlock();
195   }
196 
disableRingBuffer()197   void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
198     RingBufferInitLock.lock();
199     AllocationRingBuffer *RB = getRingBuffer();
200     if (RB)
201       RB->Depot->disable();
202   }
203 
204   // Initialize the embedded GWP-ASan instance. Requires the main allocator to
205   // be functional, best called from PostInitCallback.
initGwpAsan()206   void initGwpAsan() {
207 #ifdef GWP_ASAN_HOOKS
208     gwp_asan::options::Options Opt;
209     Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
210     Opt.MaxSimultaneousAllocations =
211         getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
212     Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
213     Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
214     Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
215     // Embedded GWP-ASan is locked through the Scudo atfork handler (via
216     // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
217     // handler.
218     Opt.InstallForkHandlers = false;
219     Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
220     GuardedAlloc.init(Opt);
221 
222     if (Opt.InstallSignalHandlers)
223       gwp_asan::segv_handler::installSignalHandlers(
224           &GuardedAlloc, Printf,
225           gwp_asan::backtrace::getPrintBacktraceFunction(),
226           gwp_asan::backtrace::getSegvBacktraceFunction(),
227           Opt.Recoverable);
228 
229     GuardedAllocSlotSize =
230         GuardedAlloc.getAllocatorState()->maximumAllocationSize();
231     Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
232                             GuardedAllocSlotSize);
233 #endif // GWP_ASAN_HOOKS
234   }
235 
236 #ifdef GWP_ASAN_HOOKS
getGwpAsanAllocationMetadata()237   const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
238     return GuardedAlloc.getMetadataRegion();
239   }
240 
getGwpAsanAllocatorState()241   const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
242     return GuardedAlloc.getAllocatorState();
243   }
244 #endif // GWP_ASAN_HOOKS
245 
246   ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
247     TSDRegistry.initThreadMaybe(this, MinimalInit);
248   }
249 
unmapTestOnly()250   void unmapTestOnly() {
251     unmapRingBuffer();
252     TSDRegistry.unmapTestOnly(this);
253     Primary.unmapTestOnly();
254     Secondary.unmapTestOnly();
255 #ifdef GWP_ASAN_HOOKS
256     if (getFlags()->GWP_ASAN_InstallSignalHandlers)
257       gwp_asan::segv_handler::uninstallSignalHandlers();
258     GuardedAlloc.uninitTestOnly();
259 #endif // GWP_ASAN_HOOKS
260   }
261 
getTSDRegistry()262   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
getQuarantine()263   QuarantineT *getQuarantine() { return &Quarantine; }
264 
265   // The Cache must be provided zero-initialized.
initCache(CacheT * Cache)266   void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
267 
268   // Release the resources used by a TSD, which involves:
269   // - draining the local quarantine cache to the global quarantine;
270   // - releasing the cached pointers back to the Primary;
271   // - unlinking the local stats from the global ones (destroying the cache does
272   //   the last two items).
commitBack(TSD<ThisT> * TSD)273   void commitBack(TSD<ThisT> *TSD) {
274     TSD->assertLocked(/*BypassCheck=*/true);
275     Quarantine.drain(&TSD->getQuarantineCache(),
276                      QuarantineCallback(*this, TSD->getCache()));
277     TSD->getCache().destroy(&Stats);
278   }
279 
drainCache(TSD<ThisT> * TSD)280   void drainCache(TSD<ThisT> *TSD) {
281     TSD->assertLocked(/*BypassCheck=*/true);
282     Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
283                                QuarantineCallback(*this, TSD->getCache()));
284     TSD->getCache().drain();
285   }
drainCaches()286   void drainCaches() { TSDRegistry.drainCaches(this); }
287 
getHeaderTaggedPointer(void * Ptr)288   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
289     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
290       return Ptr;
291     auto UntaggedPtr = untagPointer(Ptr);
292     if (UntaggedPtr != Ptr)
293       return UntaggedPtr;
294     // Secondary, or pointer allocated while memory tagging is unsupported or
295     // disabled. The tag mismatch is okay in the latter case because tags will
296     // not be checked.
297     return addHeaderTag(Ptr);
298   }
299 
addHeaderTag(uptr Ptr)300   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
301     if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
302       return Ptr;
303     return addFixedTag(Ptr, 2);
304   }
305 
addHeaderTag(void * Ptr)306   ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
307     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
308   }
309 
collectStackTrace(UNUSED StackDepot * Depot)310   NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
311 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
312     // Discard collectStackTrace() frame and allocator function frame.
313     constexpr uptr DiscardFrames = 2;
314     uptr Stack[MaxTraceSize + DiscardFrames];
315     uptr Size =
316         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
317     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
318     return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
319 #else
320     return 0;
321 #endif
322   }
323 
computeOddEvenMaskForPointerMaybe(const Options & Options,uptr Ptr,uptr ClassId)324   uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
325                                          uptr ClassId) {
326     if (!Options.get(OptionBit::UseOddEvenTags))
327       return 0;
328 
329     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
330     // even, and vice versa. Blocks are laid out Size bytes apart, and adding
331     // Size to Ptr will flip the least significant set bit of Size in Ptr, so
332     // that bit will have the pattern 010101... for consecutive blocks, which we
333     // can use to determine which tag mask to use.
334     return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
335   }
336 
337   NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
338                           uptr Alignment = MinAlignment,
339                           bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
340     initThreadMaybe();
341 
342     const Options Options = Primary.Options.load();
343     if (UNLIKELY(Alignment > MaxAlignment)) {
344       if (Options.get(OptionBit::MayReturnNull))
345         return nullptr;
346       reportAlignmentTooBig(Alignment, MaxAlignment);
347     }
348     if (Alignment < MinAlignment)
349       Alignment = MinAlignment;
350 
351 #ifdef GWP_ASAN_HOOKS
352     if (UNLIKELY(GuardedAlloc.shouldSample())) {
353       if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
354         Stats.lock();
355         Stats.add(StatAllocated, GuardedAllocSlotSize);
356         Stats.sub(StatFree, GuardedAllocSlotSize);
357         Stats.unlock();
358         return Ptr;
359       }
360     }
361 #endif // GWP_ASAN_HOOKS
362 
363     const FillContentsMode FillContents = ZeroContents ? ZeroFill
364                                           : TSDRegistry.getDisableMemInit()
365                                               ? NoFill
366                                               : Options.getFillContentsMode();
367 
368     // If the requested size happens to be 0 (more common than you might think),
369     // allocate MinAlignment bytes on top of the header. Then add the extra
370     // bytes required to fulfill the alignment requirements: we allocate enough
371     // to be sure that there will be an address in the block that will satisfy
372     // the alignment.
373     const uptr NeededSize =
374         roundUp(Size, MinAlignment) +
375         ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
376 
377     // Takes care of extravagantly large sizes as well as integer overflows.
378     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
379     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
380       if (Options.get(OptionBit::MayReturnNull))
381         return nullptr;
382       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
383     }
384     DCHECK_LE(Size, NeededSize);
385 
386     void *Block = nullptr;
387     uptr ClassId = 0;
388     uptr SecondaryBlockEnd = 0;
389     if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
390       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
391       DCHECK_NE(ClassId, 0U);
392       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
393       Block = TSD->getCache().allocate(ClassId);
394       // If the allocation failed, retry in each successively larger class until
395       // it fits. If it fails to fit in the largest class, fallback to the
396       // Secondary.
397       if (UNLIKELY(!Block)) {
398         while (ClassId < SizeClassMap::LargestClassId && !Block)
399           Block = TSD->getCache().allocate(++ClassId);
400         if (!Block)
401           ClassId = 0;
402       }
403     }
404     if (UNLIKELY(ClassId == 0)) {
405       Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
406                                  FillContents);
407     }
408 
409     if (UNLIKELY(!Block)) {
410       if (Options.get(OptionBit::MayReturnNull))
411         return nullptr;
412       printStats();
413       reportOutOfMemory(NeededSize);
414     }
415 
416     const uptr UserPtr = roundUp(
417         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Alignment);
418     const uptr SizeOrUnusedBytes =
419         ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
420 
421     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
422       return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
423                        FillContents);
424     }
425 
426     return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
427                                       SizeOrUnusedBytes, FillContents);
428   }
429 
430   NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
431                            UNUSED uptr Alignment = MinAlignment) {
432     if (UNLIKELY(!Ptr))
433       return;
434 
435     // For a deallocation, we only ensure minimal initialization, meaning thread
436     // local data will be left uninitialized for now (when using ELF TLS). The
437     // fallback cache will be used instead. This is a workaround for a situation
438     // where the only heap operation performed in a thread would be a free past
439     // the TLS destructors, ending up in initialized thread specific data never
440     // being destroyed properly. Any other heap operation will do a full init.
441     initThreadMaybe(/*MinimalInit=*/true);
442 
443 #ifdef GWP_ASAN_HOOKS
444     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
445       GuardedAlloc.deallocate(Ptr);
446       Stats.lock();
447       Stats.add(StatFree, GuardedAllocSlotSize);
448       Stats.sub(StatAllocated, GuardedAllocSlotSize);
449       Stats.unlock();
450       return;
451     }
452 #endif // GWP_ASAN_HOOKS
453 
454     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
455       reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
456 
457     void *TaggedPtr = Ptr;
458     Ptr = getHeaderTaggedPointer(Ptr);
459 
460     Chunk::UnpackedHeader Header;
461     Chunk::loadHeader(Cookie, Ptr, &Header);
462 
463     if (UNLIKELY(Header.State != Chunk::State::Allocated))
464       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
465 
466     const Options Options = Primary.Options.load();
467     if (Options.get(OptionBit::DeallocTypeMismatch)) {
468       if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
469         // With the exception of memalign'd chunks, that can be still be free'd.
470         if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
471             Origin != Chunk::Origin::Malloc)
472           reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
473                                     Header.OriginOrWasZeroed, Origin);
474       }
475     }
476 
477     const uptr Size = getSize(Ptr, &Header);
478     if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
479       if (UNLIKELY(DeleteSize != Size))
480         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
481     }
482 
483     quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
484   }
485 
486   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
487     initThreadMaybe();
488 
489     const Options Options = Primary.Options.load();
490     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
491       if (Options.get(OptionBit::MayReturnNull))
492         return nullptr;
493       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
494     }
495 
496     // The following cases are handled by the C wrappers.
497     DCHECK_NE(OldPtr, nullptr);
498     DCHECK_NE(NewSize, 0);
499 
500 #ifdef GWP_ASAN_HOOKS
501     if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
502       uptr OldSize = GuardedAlloc.getSize(OldPtr);
503       void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
504       if (NewPtr)
505         memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
506       GuardedAlloc.deallocate(OldPtr);
507       Stats.lock();
508       Stats.add(StatFree, GuardedAllocSlotSize);
509       Stats.sub(StatAllocated, GuardedAllocSlotSize);
510       Stats.unlock();
511       return NewPtr;
512     }
513 #endif // GWP_ASAN_HOOKS
514 
515     void *OldTaggedPtr = OldPtr;
516     OldPtr = getHeaderTaggedPointer(OldPtr);
517 
518     if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
519       reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
520 
521     Chunk::UnpackedHeader Header;
522     Chunk::loadHeader(Cookie, OldPtr, &Header);
523 
524     if (UNLIKELY(Header.State != Chunk::State::Allocated))
525       reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
526 
527     // Pointer has to be allocated with a malloc-type function. Some
528     // applications think that it is OK to realloc a memalign'ed pointer, which
529     // will trigger this check. It really isn't.
530     if (Options.get(OptionBit::DeallocTypeMismatch)) {
531       if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
532         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
533                                   Header.OriginOrWasZeroed,
534                                   Chunk::Origin::Malloc);
535     }
536 
537     void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
538     uptr BlockEnd;
539     uptr OldSize;
540     const uptr ClassId = Header.ClassId;
541     if (LIKELY(ClassId)) {
542       BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
543                  SizeClassMap::getSizeByClassId(ClassId);
544       OldSize = Header.SizeOrUnusedBytes;
545     } else {
546       BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
547       OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
548                             Header.SizeOrUnusedBytes);
549     }
550     // If the new chunk still fits in the previously allocated block (with a
551     // reasonable delta), we just keep the old block, and update the chunk
552     // header to reflect the size change.
553     if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
554       if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
555         // If we have reduced the size, set the extra bytes to the fill value
556         // so that we are ready to grow it again in the future.
557         if (NewSize < OldSize) {
558           const FillContentsMode FillContents =
559               TSDRegistry.getDisableMemInit() ? NoFill
560                                               : Options.getFillContentsMode();
561           if (FillContents != NoFill) {
562             memset(reinterpret_cast<char *>(OldTaggedPtr) + NewSize,
563                    FillContents == ZeroFill ? 0 : PatternFillByte,
564                    OldSize - NewSize);
565           }
566         }
567 
568         Header.SizeOrUnusedBytes =
569             (ClassId ? NewSize
570                      : BlockEnd -
571                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
572             Chunk::SizeOrUnusedBytesMask;
573         Chunk::storeHeader(Cookie, OldPtr, &Header);
574         if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
575           if (ClassId) {
576             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
577                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
578                               NewSize, untagPointer(BlockEnd));
579             storePrimaryAllocationStackMaybe(Options, OldPtr);
580           } else {
581             storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
582           }
583         }
584         return OldTaggedPtr;
585       }
586     }
587 
588     // Otherwise we allocate a new one, and deallocate the old one. Some
589     // allocators will allocate an even larger chunk (by a fixed factor) to
590     // allow for potential further in-place realloc. The gains of such a trick
591     // are currently unclear.
592     void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
593     if (LIKELY(NewPtr)) {
594       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
595       quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
596     }
597     return NewPtr;
598   }
599 
600   // TODO(kostyak): disable() is currently best-effort. There are some small
601   //                windows of time when an allocation could still succeed after
602   //                this function finishes. We will revisit that later.
disable()603   void disable() NO_THREAD_SAFETY_ANALYSIS {
604     initThreadMaybe();
605 #ifdef GWP_ASAN_HOOKS
606     GuardedAlloc.disable();
607 #endif
608     TSDRegistry.disable();
609     Stats.disable();
610     Quarantine.disable();
611     Primary.disable();
612     Secondary.disable();
613     disableRingBuffer();
614   }
615 
enable()616   void enable() NO_THREAD_SAFETY_ANALYSIS {
617     initThreadMaybe();
618     enableRingBuffer();
619     Secondary.enable();
620     Primary.enable();
621     Quarantine.enable();
622     Stats.enable();
623     TSDRegistry.enable();
624 #ifdef GWP_ASAN_HOOKS
625     GuardedAlloc.enable();
626 #endif
627   }
628 
629   // The function returns the amount of bytes required to store the statistics,
630   // which might be larger than the amount of bytes provided. Note that the
631   // statistics buffer is not necessarily constant between calls to this
632   // function. This can be called with a null buffer or zero size for buffer
633   // sizing purposes.
getStats(char * Buffer,uptr Size)634   uptr getStats(char *Buffer, uptr Size) {
635     ScopedString Str;
636     const uptr Length = getStats(&Str) + 1;
637     if (Length < Size)
638       Size = Length;
639     if (Buffer && Size) {
640       memcpy(Buffer, Str.data(), Size);
641       Buffer[Size - 1] = '\0';
642     }
643     return Length;
644   }
645 
printStats()646   void printStats() {
647     ScopedString Str;
648     getStats(&Str);
649     Str.output();
650   }
651 
printFragmentationInfo()652   void printFragmentationInfo() {
653     ScopedString Str;
654     Primary.getFragmentationInfo(&Str);
655     // Secondary allocator dumps the fragmentation data in getStats().
656     Str.output();
657   }
658 
releaseToOS(ReleaseToOS ReleaseType)659   void releaseToOS(ReleaseToOS ReleaseType) {
660     initThreadMaybe();
661     if (ReleaseType == ReleaseToOS::ForceAll)
662       drainCaches();
663     Primary.releaseToOS(ReleaseType);
664     Secondary.releaseToOS();
665   }
666 
667   // Iterate over all chunks and call a callback for all busy chunks located
668   // within the provided memory range. Said callback must not use this allocator
669   // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
iterateOverChunks(uptr Base,uptr Size,iterate_callback Callback,void * Arg)670   void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
671                          void *Arg) {
672     initThreadMaybe();
673     if (archSupportsMemoryTagging())
674       Base = untagPointer(Base);
675     const uptr From = Base;
676     const uptr To = Base + Size;
677     bool MayHaveTaggedPrimary =
678         allocatorSupportsMemoryTagging<AllocatorConfig>() &&
679         systemSupportsMemoryTagging();
680     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
681                    Arg](uptr Block) {
682       if (Block < From || Block >= To)
683         return;
684       uptr Chunk;
685       Chunk::UnpackedHeader Header;
686       if (MayHaveTaggedPrimary) {
687         // A chunk header can either have a zero tag (tagged primary) or the
688         // header tag (secondary, or untagged primary). We don't know which so
689         // try both.
690         ScopedDisableMemoryTagChecks x;
691         if (!getChunkFromBlock(Block, &Chunk, &Header) &&
692             !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
693           return;
694       } else {
695         if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
696           return;
697       }
698       if (Header.State == Chunk::State::Allocated) {
699         uptr TaggedChunk = Chunk;
700         if (allocatorSupportsMemoryTagging<AllocatorConfig>())
701           TaggedChunk = untagPointer(TaggedChunk);
702         if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
703           TaggedChunk = loadTag(Chunk);
704         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
705                  Arg);
706       }
707     };
708     Primary.iterateOverBlocks(Lambda);
709     Secondary.iterateOverBlocks(Lambda);
710 #ifdef GWP_ASAN_HOOKS
711     GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
712 #endif
713   }
714 
canReturnNull()715   bool canReturnNull() {
716     initThreadMaybe();
717     return Primary.Options.load().get(OptionBit::MayReturnNull);
718   }
719 
setOption(Option O,sptr Value)720   bool setOption(Option O, sptr Value) {
721     initThreadMaybe();
722     if (O == Option::MemtagTuning) {
723       // Enabling odd/even tags involves a tradeoff between use-after-free
724       // detection and buffer overflow detection. Odd/even tags make it more
725       // likely for buffer overflows to be detected by increasing the size of
726       // the guaranteed "red zone" around the allocation, but on the other hand
727       // use-after-free is less likely to be detected because the tag space for
728       // any particular chunk is cut in half. Therefore we use this tuning
729       // setting to control whether odd/even tags are enabled.
730       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
731         Primary.Options.set(OptionBit::UseOddEvenTags);
732       else if (Value == M_MEMTAG_TUNING_UAF)
733         Primary.Options.clear(OptionBit::UseOddEvenTags);
734       return true;
735     } else {
736       // We leave it to the various sub-components to decide whether or not they
737       // want to handle the option, but we do not want to short-circuit
738       // execution if one of the setOption was to return false.
739       const bool PrimaryResult = Primary.setOption(O, Value);
740       const bool SecondaryResult = Secondary.setOption(O, Value);
741       const bool RegistryResult = TSDRegistry.setOption(O, Value);
742       return PrimaryResult && SecondaryResult && RegistryResult;
743     }
744     return false;
745   }
746 
747   // Return the usable size for a given chunk. Technically we lie, as we just
748   // report the actual size of a chunk. This is done to counteract code actively
749   // writing past the end of a chunk (like sqlite3) when the usable size allows
750   // for it, which then forces realloc to copy the usable size of a chunk as
751   // opposed to its actual size.
getUsableSize(const void * Ptr)752   uptr getUsableSize(const void *Ptr) {
753     if (UNLIKELY(!Ptr))
754       return 0;
755 
756     return getAllocSize(Ptr);
757   }
758 
getAllocSize(const void * Ptr)759   uptr getAllocSize(const void *Ptr) {
760     initThreadMaybe();
761 
762 #ifdef GWP_ASAN_HOOKS
763     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
764       return GuardedAlloc.getSize(Ptr);
765 #endif // GWP_ASAN_HOOKS
766 
767     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
768     Chunk::UnpackedHeader Header;
769     Chunk::loadHeader(Cookie, Ptr, &Header);
770 
771     // Getting the alloc size of a chunk only makes sense if it's allocated.
772     if (UNLIKELY(Header.State != Chunk::State::Allocated))
773       reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
774 
775     return getSize(Ptr, &Header);
776   }
777 
getStats(StatCounters S)778   void getStats(StatCounters S) {
779     initThreadMaybe();
780     Stats.get(S);
781   }
782 
783   // Returns true if the pointer provided was allocated by the current
784   // allocator instance, which is compliant with tcmalloc's ownership concept.
785   // A corrupted chunk will not be reported as owned, which is WAI.
isOwned(const void * Ptr)786   bool isOwned(const void *Ptr) {
787     initThreadMaybe();
788     // If the allocation is not owned, the tags could be wrong.
789     ScopedDisableMemoryTagChecks x(
790         useMemoryTagging<AllocatorConfig>(Primary.Options.load()));
791 #ifdef GWP_ASAN_HOOKS
792     if (GuardedAlloc.pointerIsMine(Ptr))
793       return true;
794 #endif // GWP_ASAN_HOOKS
795     if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
796       return false;
797     Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
798     Chunk::UnpackedHeader Header;
799     return Chunk::isValid(Cookie, Ptr, &Header) &&
800            Header.State == Chunk::State::Allocated;
801   }
802 
useMemoryTaggingTestOnly()803   bool useMemoryTaggingTestOnly() const {
804     return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
805   }
disableMemoryTagging()806   void disableMemoryTagging() {
807     // If we haven't been initialized yet, we need to initialize now in order to
808     // prevent a future call to initThreadMaybe() from enabling memory tagging
809     // based on feature detection. But don't call initThreadMaybe() because it
810     // may end up calling the allocator (via pthread_atfork, via the post-init
811     // callback), which may cause mappings to be created with memory tagging
812     // enabled.
813     TSDRegistry.initOnceMaybe(this);
814     if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
815       Secondary.disableMemoryTagging();
816       Primary.Options.clear(OptionBit::UseMemoryTagging);
817     }
818   }
819 
setTrackAllocationStacks(bool Track)820   void setTrackAllocationStacks(bool Track) {
821     initThreadMaybe();
822     if (getFlags()->allocation_ring_buffer_size <= 0) {
823       DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
824       return;
825     }
826 
827     if (Track) {
828       initRingBufferMaybe();
829       Primary.Options.set(OptionBit::TrackAllocationStacks);
830     } else
831       Primary.Options.clear(OptionBit::TrackAllocationStacks);
832   }
833 
setFillContents(FillContentsMode FillContents)834   void setFillContents(FillContentsMode FillContents) {
835     initThreadMaybe();
836     Primary.Options.setFillContentsMode(FillContents);
837   }
838 
setAddLargeAllocationSlack(bool AddSlack)839   void setAddLargeAllocationSlack(bool AddSlack) {
840     initThreadMaybe();
841     if (AddSlack)
842       Primary.Options.set(OptionBit::AddLargeAllocationSlack);
843     else
844       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
845   }
846 
getStackDepotAddress()847   const char *getStackDepotAddress() {
848     initThreadMaybe();
849     AllocationRingBuffer *RB = getRingBuffer();
850     return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
851   }
852 
getStackDepotSize()853   uptr getStackDepotSize() {
854     initThreadMaybe();
855     AllocationRingBuffer *RB = getRingBuffer();
856     return RB ? RB->StackDepotSize : 0;
857   }
858 
getRegionInfoArrayAddress()859   const char *getRegionInfoArrayAddress() const {
860     return Primary.getRegionInfoArrayAddress();
861   }
862 
getRegionInfoArraySize()863   static uptr getRegionInfoArraySize() {
864     return PrimaryT::getRegionInfoArraySize();
865   }
866 
getRingBufferAddress()867   const char *getRingBufferAddress() {
868     initThreadMaybe();
869     return reinterpret_cast<char *>(getRingBuffer());
870   }
871 
getRingBufferSize()872   uptr getRingBufferSize() {
873     initThreadMaybe();
874     AllocationRingBuffer *RB = getRingBuffer();
875     return RB && RB->RingBufferElements
876                ? ringBufferSizeInBytes(RB->RingBufferElements)
877                : 0;
878   }
879 
880   static const uptr MaxTraceSize = 64;
881 
collectTraceMaybe(const StackDepot * Depot,uintptr_t (& Trace)[MaxTraceSize],u32 Hash)882   static void collectTraceMaybe(const StackDepot *Depot,
883                                 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
884     uptr RingPos, Size;
885     if (!Depot->find(Hash, &RingPos, &Size))
886       return;
887     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
888       Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
889   }
890 
getErrorInfo(struct scudo_error_info * ErrorInfo,uintptr_t FaultAddr,const char * DepotPtr,size_t DepotSize,const char * RegionInfoPtr,const char * RingBufferPtr,size_t RingBufferSize,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize)891   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
892                            uintptr_t FaultAddr, const char *DepotPtr,
893                            size_t DepotSize, const char *RegionInfoPtr,
894                            const char *RingBufferPtr, size_t RingBufferSize,
895                            const char *Memory, const char *MemoryTags,
896                            uintptr_t MemoryAddr, size_t MemorySize) {
897     // N.B. we need to support corrupted data in any of the buffers here. We get
898     // this information from an external process (the crashing process) that
899     // should not be able to crash the crash dumper (crash_dump on Android).
900     // See also the get_error_info_fuzzer.
901     *ErrorInfo = {};
902     if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
903         MemoryAddr + MemorySize < MemoryAddr)
904       return;
905 
906     const StackDepot *Depot = nullptr;
907     if (DepotPtr) {
908       // check for corrupted StackDepot. First we need to check whether we can
909       // read the metadata, then whether the metadata matches the size.
910       if (DepotSize < sizeof(*Depot))
911         return;
912       Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
913       if (!Depot->isValid(DepotSize))
914         return;
915     }
916 
917     size_t NextErrorReport = 0;
918 
919     // Check for OOB in the current block and the two surrounding blocks. Beyond
920     // that, UAF is more likely.
921     if (extractTag(FaultAddr) != 0)
922       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
923                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
924                          MemorySize, 0, 2);
925 
926     // Check the ring buffer. For primary allocations this will only find UAF;
927     // for secondary allocations we can find either UAF or OOB.
928     getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
929                            RingBufferPtr, RingBufferSize);
930 
931     // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
932     // Beyond that we are likely to hit false positives.
933     if (extractTag(FaultAddr) != 0)
934       getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
935                          RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
936                          MemorySize, 2, 16);
937   }
938 
939 private:
940   typedef typename PrimaryT::SizeClassMap SizeClassMap;
941 
942   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
943   static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
944   static const uptr MinAlignment = 1UL << MinAlignmentLog;
945   static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
946   static const uptr MaxAllowedMallocSize =
947       FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
948 
949   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
950                 "Minimal alignment must at least cover a chunk header.");
951   static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
952                     MinAlignment >= archMemoryTagGranuleSize(),
953                 "");
954 
955   static const u32 BlockMarker = 0x44554353U;
956 
957   // These are indexes into an "array" of 32-bit values that store information
958   // inline with a chunk that is relevant to diagnosing memory tag faults, where
959   // 0 corresponds to the address of the user memory. This means that only
960   // negative indexes may be used. The smallest index that may be used is -2,
961   // which corresponds to 8 bytes before the user memory, because the chunk
962   // header size is 8 bytes and in allocators that support memory tagging the
963   // minimum alignment is at least the tag granule size (16 on aarch64).
964   static const sptr MemTagAllocationTraceIndex = -2;
965   static const sptr MemTagAllocationTidIndex = -1;
966 
967   u32 Cookie = 0;
968   u32 QuarantineMaxChunkSize = 0;
969 
970   GlobalStats Stats;
971   PrimaryT Primary;
972   SecondaryT Secondary;
973   QuarantineT Quarantine;
974   TSDRegistryT TSDRegistry;
975   pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
976 
977 #ifdef GWP_ASAN_HOOKS
978   gwp_asan::GuardedPoolAllocator GuardedAlloc;
979   uptr GuardedAllocSlotSize = 0;
980 #endif // GWP_ASAN_HOOKS
981 
982   struct AllocationRingBuffer {
983     struct Entry {
984       atomic_uptr Ptr;
985       atomic_uptr AllocationSize;
986       atomic_u32 AllocationTrace;
987       atomic_u32 AllocationTid;
988       atomic_u32 DeallocationTrace;
989       atomic_u32 DeallocationTid;
990     };
991     StackDepot *Depot = nullptr;
992     uptr StackDepotSize = 0;
993     MemMapT RawRingBufferMap;
994     MemMapT RawStackDepotMap;
995     u32 RingBufferElements = 0;
996     atomic_uptr Pos;
997     // An array of Size (at least one) elements of type Entry is immediately
998     // following to this struct.
999   };
1000   static_assert(sizeof(AllocationRingBuffer) %
1001                         alignof(typename AllocationRingBuffer::Entry) ==
1002                     0,
1003                 "invalid alignment");
1004 
1005   // Lock to initialize the RingBuffer
1006   HybridMutex RingBufferInitLock;
1007 
1008   // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1009   // and immediately followed by Size elements of type Entry.
1010   atomic_uptr RingBufferAddress = {};
1011 
getRingBuffer()1012   AllocationRingBuffer *getRingBuffer() {
1013     return reinterpret_cast<AllocationRingBuffer *>(
1014         atomic_load(&RingBufferAddress, memory_order_acquire));
1015   }
1016 
1017   // The following might get optimized out by the compiler.
performSanityChecks()1018   NOINLINE void performSanityChecks() {
1019     // Verify that the header offset field can hold the maximum offset. In the
1020     // case of the Secondary allocator, it takes care of alignment and the
1021     // offset will always be small. In the case of the Primary, the worst case
1022     // scenario happens in the last size class, when the backend allocation
1023     // would already be aligned on the requested alignment, which would happen
1024     // to be the maximum alignment that would fit in that size class. As a
1025     // result, the maximum offset will be at most the maximum alignment for the
1026     // last size class minus the header size, in multiples of MinAlignment.
1027     Chunk::UnpackedHeader Header = {};
1028     const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1029                                          SizeClassMap::MaxSize - MinAlignment);
1030     const uptr MaxOffset =
1031         (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1032     Header.Offset = MaxOffset & Chunk::OffsetMask;
1033     if (UNLIKELY(Header.Offset != MaxOffset))
1034       reportSanityCheckError("offset");
1035 
1036     // Verify that we can fit the maximum size or amount of unused bytes in the
1037     // header. Given that the Secondary fits the allocation to a page, the worst
1038     // case scenario happens in the Primary. It will depend on the second to
1039     // last and last class sizes, as well as the dynamic base for the Primary.
1040     // The following is an over-approximation that works for our needs.
1041     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1042     Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1043     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1044       reportSanityCheckError("size (or unused bytes)");
1045 
1046     const uptr LargestClassId = SizeClassMap::LargestClassId;
1047     Header.ClassId = LargestClassId;
1048     if (UNLIKELY(Header.ClassId != LargestClassId))
1049       reportSanityCheckError("class ID");
1050   }
1051 
getBlockBegin(const void * Ptr,Chunk::UnpackedHeader * Header)1052   static inline void *getBlockBegin(const void *Ptr,
1053                                     Chunk::UnpackedHeader *Header) {
1054     return reinterpret_cast<void *>(
1055         reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1056         (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1057   }
1058 
1059   // Return the size of a chunk as requested during its allocation.
getSize(const void * Ptr,Chunk::UnpackedHeader * Header)1060   inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1061     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1062     if (LIKELY(Header->ClassId))
1063       return SizeOrUnusedBytes;
1064     if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1065       Ptr = untagPointer(const_cast<void *>(Ptr));
1066     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1067            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1068   }
1069 
initChunk(const uptr ClassId,const Chunk::Origin Origin,void * Block,const uptr UserPtr,const uptr SizeOrUnusedBytes,const FillContentsMode FillContents)1070   ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
1071                                 void *Block, const uptr UserPtr,
1072                                 const uptr SizeOrUnusedBytes,
1073                                 const FillContentsMode FillContents) {
1074     // Compute the default pointer before adding the header tag
1075     const uptr DefaultAlignedPtr =
1076         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1077 
1078     Block = addHeaderTag(Block);
1079     // Only do content fill when it's from primary allocator because secondary
1080     // allocator has filled the content.
1081     if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
1082       // This condition is not necessarily unlikely, but since memset is
1083       // costly, we might as well mark it as such.
1084       memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
1085              PrimaryT::getSizeByClassId(ClassId));
1086     }
1087 
1088     Chunk::UnpackedHeader Header = {};
1089 
1090     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1091       const uptr Offset = UserPtr - DefaultAlignedPtr;
1092       DCHECK_GE(Offset, 2 * sizeof(u32));
1093       // The BlockMarker has no security purpose, but is specifically meant for
1094       // the chunk iteration function that can be used in debugging situations.
1095       // It is the only situation where we have to locate the start of a chunk
1096       // based on its block address.
1097       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1098       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1099       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1100     }
1101 
1102     Header.ClassId = ClassId & Chunk::ClassIdMask;
1103     Header.State = Chunk::State::Allocated;
1104     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1105     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1106     Chunk::storeHeader(Cookie, reinterpret_cast<void *>(addHeaderTag(UserPtr)),
1107                        &Header);
1108 
1109     return reinterpret_cast<void *>(UserPtr);
1110   }
1111 
1112   NOINLINE void *
initChunkWithMemoryTagging(const uptr ClassId,const Chunk::Origin Origin,void * Block,const uptr UserPtr,const uptr Size,const uptr SizeOrUnusedBytes,const FillContentsMode FillContents)1113   initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
1114                              void *Block, const uptr UserPtr, const uptr Size,
1115                              const uptr SizeOrUnusedBytes,
1116                              const FillContentsMode FillContents) {
1117     const Options Options = Primary.Options.load();
1118     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1119 
1120     // Compute the default pointer before adding the header tag
1121     const uptr DefaultAlignedPtr =
1122         reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1123 
1124     void *Ptr = reinterpret_cast<void *>(UserPtr);
1125     void *TaggedPtr = Ptr;
1126 
1127     if (LIKELY(ClassId)) {
1128       // Init the primary chunk.
1129       //
1130       // We only need to zero or tag the contents for Primary backed
1131       // allocations. We only set tags for primary allocations in order to avoid
1132       // faulting potentially large numbers of pages for large secondary
1133       // allocations. We assume that guard pages are enough to protect these
1134       // allocations.
1135       //
1136       // FIXME: When the kernel provides a way to set the background tag of a
1137       // mapping, we should be able to tag secondary allocations as well.
1138       //
1139       // When memory tagging is enabled, zeroing the contents is done as part of
1140       // setting the tag.
1141 
1142       Chunk::UnpackedHeader Header;
1143       const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
1144       const uptr BlockUptr = reinterpret_cast<uptr>(Block);
1145       const uptr BlockEnd = BlockUptr + BlockSize;
1146       // If possible, try to reuse the UAF tag that was set by deallocate().
1147       // For simplicity, only reuse tags if we have the same start address as
1148       // the previous allocation. This handles the majority of cases since
1149       // most allocations will not be more aligned than the minimum alignment.
1150       //
1151       // We need to handle situations involving reclaimed chunks, and retag
1152       // the reclaimed portions if necessary. In the case where the chunk is
1153       // fully reclaimed, the chunk's header will be zero, which will trigger
1154       // the code path for new mappings and invalid chunks that prepares the
1155       // chunk from scratch. There are three possibilities for partial
1156       // reclaiming:
1157       //
1158       // (1) Header was reclaimed, data was partially reclaimed.
1159       // (2) Header was not reclaimed, all data was reclaimed (e.g. because
1160       //     data started on a page boundary).
1161       // (3) Header was not reclaimed, data was partially reclaimed.
1162       //
1163       // Case (1) will be handled in the same way as for full reclaiming,
1164       // since the header will be zero.
1165       //
1166       // We can detect case (2) by loading the tag from the start
1167       // of the chunk. If it is zero, it means that either all data was
1168       // reclaimed (since we never use zero as the chunk tag), or that the
1169       // previous allocation was of size zero. Either way, we need to prepare
1170       // a new chunk from scratch.
1171       //
1172       // We can detect case (3) by moving to the next page (if covered by the
1173       // chunk) and loading the tag of its first granule. If it is zero, it
1174       // means that all following pages may need to be retagged. On the other
1175       // hand, if it is nonzero, we can assume that all following pages are
1176       // still tagged, according to the logic that if any of the pages
1177       // following the next page were reclaimed, the next page would have been
1178       // reclaimed as well.
1179       uptr TaggedUserPtr;
1180       uptr PrevUserPtr;
1181       if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
1182           PrevUserPtr == UserPtr &&
1183           (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
1184         uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
1185         const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
1186         if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
1187           PrevEnd = NextPage;
1188         TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
1189         resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
1190         if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
1191           // If an allocation needs to be zeroed (i.e. calloc) we can normally
1192           // avoid zeroing the memory now since we can rely on memory having
1193           // been zeroed on free, as this is normally done while setting the
1194           // UAF tag. But if tagging was disabled per-thread when the memory
1195           // was freed, it would not have been retagged and thus zeroed, and
1196           // therefore it needs to be zeroed now.
1197           memset(TaggedPtr, 0,
1198                  Min(Size, roundUp(PrevEnd - TaggedUserPtr,
1199                                    archMemoryTagGranuleSize())));
1200         } else if (Size) {
1201           // Clear any stack metadata that may have previously been stored in
1202           // the chunk data.
1203           memset(TaggedPtr, 0, archMemoryTagGranuleSize());
1204         }
1205       } else {
1206         const uptr OddEvenMask =
1207             computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
1208         TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
1209       }
1210       storePrimaryAllocationStackMaybe(Options, Ptr);
1211     } else {
1212       // Init the secondary chunk.
1213 
1214       Block = addHeaderTag(Block);
1215       Ptr = addHeaderTag(Ptr);
1216       storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
1217       storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
1218     }
1219 
1220     Chunk::UnpackedHeader Header = {};
1221 
1222     if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1223       const uptr Offset = UserPtr - DefaultAlignedPtr;
1224       DCHECK_GE(Offset, 2 * sizeof(u32));
1225       // The BlockMarker has no security purpose, but is specifically meant for
1226       // the chunk iteration function that can be used in debugging situations.
1227       // It is the only situation where we have to locate the start of a chunk
1228       // based on its block address.
1229       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1230       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1231       Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1232     }
1233 
1234     Header.ClassId = ClassId & Chunk::ClassIdMask;
1235     Header.State = Chunk::State::Allocated;
1236     Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1237     Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1238     Chunk::storeHeader(Cookie, Ptr, &Header);
1239 
1240     return TaggedPtr;
1241   }
1242 
quarantineOrDeallocateChunk(const Options & Options,void * TaggedPtr,Chunk::UnpackedHeader * Header,uptr Size)1243   void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
1244                                    Chunk::UnpackedHeader *Header,
1245                                    uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1246     void *Ptr = getHeaderTaggedPointer(TaggedPtr);
1247     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1248     // than the maximum allowed, we return a chunk directly to the backend.
1249     // This purposefully underflows for Size == 0.
1250     const bool BypassQuarantine = !Quarantine.getCacheSize() ||
1251                                   ((Size - 1) >= QuarantineMaxChunkSize) ||
1252                                   !Header->ClassId;
1253     if (BypassQuarantine)
1254       Header->State = Chunk::State::Available;
1255     else
1256       Header->State = Chunk::State::Quarantined;
1257 
1258     if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options)))
1259       Header->OriginOrWasZeroed = 0U;
1260     else {
1261       Header->OriginOrWasZeroed =
1262           Header->ClassId && !TSDRegistry.getDisableMemInit();
1263     }
1264 
1265     Chunk::storeHeader(Cookie, Ptr, Header);
1266 
1267     if (BypassQuarantine) {
1268       void *BlockBegin;
1269       if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
1270         // Must do this after storeHeader because loadHeader uses a tagged ptr.
1271         if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1272           Ptr = untagPointer(Ptr);
1273         BlockBegin = getBlockBegin(Ptr, Header);
1274       } else {
1275         BlockBegin = retagBlock(Options, TaggedPtr, Ptr, Header, Size, true);
1276       }
1277 
1278       const uptr ClassId = Header->ClassId;
1279       if (LIKELY(ClassId)) {
1280         bool CacheDrained;
1281         {
1282           typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1283           CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
1284         }
1285         // When we have drained some blocks back to the Primary from TSD, that
1286         // implies that we may have the chance to release some pages as well.
1287         // Note that in order not to block other thread's accessing the TSD,
1288         // release the TSD first then try the page release.
1289         if (CacheDrained)
1290           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1291       } else {
1292         Secondary.deallocate(Options, BlockBegin);
1293       }
1294     } else {
1295       if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options)))
1296         retagBlock(Options, TaggedPtr, Ptr, Header, Size, false);
1297       typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1298       Quarantine.put(&TSD->getQuarantineCache(),
1299                      QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1300     }
1301   }
1302 
retagBlock(const Options & Options,void * TaggedPtr,void * & Ptr,Chunk::UnpackedHeader * Header,const uptr Size,bool BypassQuarantine)1303   NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
1304                             Chunk::UnpackedHeader *Header, const uptr Size,
1305                             bool BypassQuarantine) {
1306     DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1307 
1308     const u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
1309     storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1310     if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1311       uptr TaggedBegin, TaggedEnd;
1312       const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1313           Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1314           Header->ClassId);
1315       // Exclude the previous tag so that immediate use after free is
1316       // detected 100% of the time.
1317       setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
1318                    &TaggedEnd);
1319     }
1320 
1321     Ptr = untagPointer(Ptr);
1322     void *BlockBegin = getBlockBegin(Ptr, Header);
1323     if (BypassQuarantine && !Header->ClassId) {
1324       storeTags(reinterpret_cast<uptr>(BlockBegin),
1325                 reinterpret_cast<uptr>(Ptr));
1326     }
1327 
1328     return BlockBegin;
1329   }
1330 
getChunkFromBlock(uptr Block,uptr * Chunk,Chunk::UnpackedHeader * Header)1331   bool getChunkFromBlock(uptr Block, uptr *Chunk,
1332                          Chunk::UnpackedHeader *Header) {
1333     *Chunk =
1334         Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
1335     return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
1336   }
1337 
getChunkOffsetFromBlock(const char * Block)1338   static uptr getChunkOffsetFromBlock(const char *Block) {
1339     u32 Offset = 0;
1340     if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1341       Offset = reinterpret_cast<const u32 *>(Block)[1];
1342     return Offset + Chunk::getHeaderSize();
1343   }
1344 
1345   // Set the tag of the granule past the end of the allocation to 0, to catch
1346   // linear overflows even if a previous larger allocation used the same block
1347   // and tag. Only do this if the granule past the end is in our block, because
1348   // this would otherwise lead to a SEGV if the allocation covers the entire
1349   // block and our block is at the end of a mapping. The tag of the next block's
1350   // header granule will be set to 0, so it will serve the purpose of catching
1351   // linear overflows in this case.
1352   //
1353   // For allocations of size 0 we do not end up storing the address tag to the
1354   // memory tag space, which getInlineErrorInfo() normally relies on to match
1355   // address tags against chunks. To allow matching in this case we store the
1356   // address tag in the first byte of the chunk.
storeEndMarker(uptr End,uptr Size,uptr BlockEnd)1357   void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1358     DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1359     uptr UntaggedEnd = untagPointer(End);
1360     if (UntaggedEnd != BlockEnd) {
1361       storeTag(UntaggedEnd);
1362       if (Size == 0)
1363         *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
1364     }
1365   }
1366 
prepareTaggedChunk(void * Ptr,uptr Size,uptr ExcludeMask,uptr BlockEnd)1367   void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1368                            uptr BlockEnd) {
1369     // Prepare the granule before the chunk to store the chunk header by setting
1370     // its tag to 0. Normally its tag will already be 0, but in the case where a
1371     // chunk holding a low alignment allocation is reused for a higher alignment
1372     // allocation, the chunk may already have a non-zero tag from the previous
1373     // allocation.
1374     storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1375 
1376     uptr TaggedBegin, TaggedEnd;
1377     setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
1378 
1379     storeEndMarker(TaggedEnd, Size, BlockEnd);
1380     return reinterpret_cast<void *>(TaggedBegin);
1381   }
1382 
resizeTaggedChunk(uptr OldPtr,uptr NewPtr,uptr NewSize,uptr BlockEnd)1383   void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1384                          uptr BlockEnd) {
1385     uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
1386     uptr RoundNewPtr;
1387     if (RoundOldPtr >= NewPtr) {
1388       // If the allocation is shrinking we just need to set the tag past the end
1389       // of the allocation to 0. See explanation in storeEndMarker() above.
1390       RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
1391     } else {
1392       // Set the memory tag of the region
1393       // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1394       // to the pointer tag stored in OldPtr.
1395       RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
1396     }
1397     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
1398   }
1399 
storePrimaryAllocationStackMaybe(const Options & Options,void * Ptr)1400   void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1401     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1402       return;
1403     AllocationRingBuffer *RB = getRingBuffer();
1404     if (!RB)
1405       return;
1406     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1407     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
1408     Ptr32[MemTagAllocationTidIndex] = getThreadID();
1409   }
1410 
storeRingBufferEntry(AllocationRingBuffer * RB,void * Ptr,u32 AllocationTrace,u32 AllocationTid,uptr AllocationSize,u32 DeallocationTrace,u32 DeallocationTid)1411   void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
1412                             u32 AllocationTrace, u32 AllocationTid,
1413                             uptr AllocationSize, u32 DeallocationTrace,
1414                             u32 DeallocationTid) {
1415     uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
1416     typename AllocationRingBuffer::Entry *Entry =
1417         getRingBufferEntry(RB, Pos % RB->RingBufferElements);
1418 
1419     // First invalidate our entry so that we don't attempt to interpret a
1420     // partially written state in getSecondaryErrorInfo(). The fences below
1421     // ensure that the compiler does not move the stores to Ptr in between the
1422     // stores to the other fields.
1423     atomic_store_relaxed(&Entry->Ptr, 0);
1424 
1425     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1426     atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1427     atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1428     atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1429     atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1430     atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1431     __atomic_signal_fence(__ATOMIC_SEQ_CST);
1432 
1433     atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1434   }
1435 
storeSecondaryAllocationStackMaybe(const Options & Options,void * Ptr,uptr Size)1436   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
1437                                           uptr Size) {
1438     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1439       return;
1440     AllocationRingBuffer *RB = getRingBuffer();
1441     if (!RB)
1442       return;
1443     u32 Trace = collectStackTrace(RB->Depot);
1444     u32 Tid = getThreadID();
1445 
1446     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1447     Ptr32[MemTagAllocationTraceIndex] = Trace;
1448     Ptr32[MemTagAllocationTidIndex] = Tid;
1449 
1450     storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1451   }
1452 
storeDeallocationStackMaybe(const Options & Options,void * Ptr,u8 PrevTag,uptr Size)1453   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1454                                    u8 PrevTag, uptr Size) {
1455     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1456       return;
1457     AllocationRingBuffer *RB = getRingBuffer();
1458     if (!RB)
1459       return;
1460     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1461     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1462     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1463 
1464     u32 DeallocationTrace = collectStackTrace(RB->Depot);
1465     u32 DeallocationTid = getThreadID();
1466 
1467     storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
1468                          AllocationTrace, AllocationTid, Size,
1469                          DeallocationTrace, DeallocationTid);
1470   }
1471 
1472   static const size_t NumErrorReports =
1473       sizeof(((scudo_error_info *)nullptr)->reports) /
1474       sizeof(((scudo_error_info *)nullptr)->reports[0]);
1475 
getInlineErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RegionInfoPtr,const char * Memory,const char * MemoryTags,uintptr_t MemoryAddr,size_t MemorySize,size_t MinDistance,size_t MaxDistance)1476   static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1477                                  size_t &NextErrorReport, uintptr_t FaultAddr,
1478                                  const StackDepot *Depot,
1479                                  const char *RegionInfoPtr, const char *Memory,
1480                                  const char *MemoryTags, uintptr_t MemoryAddr,
1481                                  size_t MemorySize, size_t MinDistance,
1482                                  size_t MaxDistance) {
1483     uptr UntaggedFaultAddr = untagPointer(FaultAddr);
1484     u8 FaultAddrTag = extractTag(FaultAddr);
1485     BlockInfo Info =
1486         PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1487 
1488     auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1489       if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1490           Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1491         return false;
1492       *Data = &Memory[Addr - MemoryAddr];
1493       *Tag = static_cast<u8>(
1494           MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1495       return true;
1496     };
1497 
1498     auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1499                          Chunk::UnpackedHeader *Header, const u32 **Data,
1500                          u8 *Tag) {
1501       const char *BlockBegin;
1502       u8 BlockBeginTag;
1503       if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1504         return false;
1505       uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
1506       *ChunkAddr = Addr + ChunkOffset;
1507 
1508       const char *ChunkBegin;
1509       if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1510         return false;
1511       *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1512           ChunkBegin - Chunk::getHeaderSize());
1513       *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1514 
1515       // Allocations of size 0 will have stashed the tag in the first byte of
1516       // the chunk, see storeEndMarker().
1517       if (Header->SizeOrUnusedBytes == 0)
1518         *Tag = static_cast<u8>(*ChunkBegin);
1519 
1520       return true;
1521     };
1522 
1523     if (NextErrorReport == NumErrorReports)
1524       return;
1525 
1526     auto CheckOOB = [&](uptr BlockAddr) {
1527       if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1528         return false;
1529 
1530       uptr ChunkAddr;
1531       Chunk::UnpackedHeader Header;
1532       const u32 *Data;
1533       uint8_t Tag;
1534       if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1535           Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1536         return false;
1537 
1538       auto *R = &ErrorInfo->reports[NextErrorReport++];
1539       R->error_type =
1540           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1541       R->allocation_address = ChunkAddr;
1542       R->allocation_size = Header.SizeOrUnusedBytes;
1543       if (Depot) {
1544         collectTraceMaybe(Depot, R->allocation_trace,
1545                           Data[MemTagAllocationTraceIndex]);
1546       }
1547       R->allocation_tid = Data[MemTagAllocationTidIndex];
1548       return NextErrorReport == NumErrorReports;
1549     };
1550 
1551     if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1552       return;
1553 
1554     for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
1555       if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1556           CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1557         return;
1558   }
1559 
getRingBufferErrorInfo(struct scudo_error_info * ErrorInfo,size_t & NextErrorReport,uintptr_t FaultAddr,const StackDepot * Depot,const char * RingBufferPtr,size_t RingBufferSize)1560   static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1561                                      size_t &NextErrorReport,
1562                                      uintptr_t FaultAddr,
1563                                      const StackDepot *Depot,
1564                                      const char *RingBufferPtr,
1565                                      size_t RingBufferSize) {
1566     auto *RingBuffer =
1567         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1568     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
1569     if (!RingBuffer || RingBufferElements == 0 || !Depot)
1570       return;
1571     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1572 
1573     for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1574                            NextErrorReport != NumErrorReports;
1575          --I) {
1576       auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
1577       uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1578       if (!EntryPtr)
1579         continue;
1580 
1581       uptr UntaggedEntryPtr = untagPointer(EntryPtr);
1582       uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1583       u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1584       u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1585       u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1586       u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1587 
1588       if (DeallocationTid) {
1589         // For UAF we only consider in-bounds fault addresses because
1590         // out-of-bounds UAF is rare and attempting to detect it is very likely
1591         // to result in false positives.
1592         if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1593           continue;
1594       } else {
1595         // Ring buffer OOB is only possible with secondary allocations. In this
1596         // case we are guaranteed a guard region of at least a page on either
1597         // side of the allocation (guard page on the right, guard page + tagged
1598         // region on the left), so ignore any faults outside of that range.
1599         if (FaultAddr < EntryPtr - getPageSizeCached() ||
1600             FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1601           continue;
1602 
1603         // For UAF the ring buffer will contain two entries, one for the
1604         // allocation and another for the deallocation. Don't report buffer
1605         // overflow/underflow using the allocation entry if we have already
1606         // collected a report from the deallocation entry.
1607         bool Found = false;
1608         for (uptr J = 0; J != NextErrorReport; ++J) {
1609           if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1610             Found = true;
1611             break;
1612           }
1613         }
1614         if (Found)
1615           continue;
1616       }
1617 
1618       auto *R = &ErrorInfo->reports[NextErrorReport++];
1619       if (DeallocationTid)
1620         R->error_type = USE_AFTER_FREE;
1621       else if (FaultAddr < EntryPtr)
1622         R->error_type = BUFFER_UNDERFLOW;
1623       else
1624         R->error_type = BUFFER_OVERFLOW;
1625 
1626       R->allocation_address = UntaggedEntryPtr;
1627       R->allocation_size = EntrySize;
1628       collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1629       R->allocation_tid = AllocationTid;
1630       collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1631       R->deallocation_tid = DeallocationTid;
1632     }
1633   }
1634 
getStats(ScopedString * Str)1635   uptr getStats(ScopedString *Str) {
1636     Primary.getStats(Str);
1637     Secondary.getStats(Str);
1638     Quarantine.getStats(Str);
1639     TSDRegistry.getStats(Str);
1640     return Str->length();
1641   }
1642 
1643   static typename AllocationRingBuffer::Entry *
getRingBufferEntry(AllocationRingBuffer * RB,uptr N)1644   getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
1645     char *RBEntryStart =
1646         &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
1647     return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1648         RBEntryStart)[N];
1649   }
1650   static const typename AllocationRingBuffer::Entry *
getRingBufferEntry(const AllocationRingBuffer * RB,uptr N)1651   getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
1652     const char *RBEntryStart =
1653         &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
1654     return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1655         RBEntryStart)[N];
1656   }
1657 
initRingBufferMaybe()1658   void initRingBufferMaybe() {
1659     ScopedLock L(RingBufferInitLock);
1660     if (getRingBuffer() != nullptr)
1661       return;
1662 
1663     int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
1664     if (ring_buffer_size <= 0)
1665       return;
1666 
1667     u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
1668 
1669     // We store alloc and free stacks for each entry.
1670     constexpr u32 kStacksPerRingBufferEntry = 2;
1671     constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
1672     static_assert(isPowerOfTwo(kMaxU32Pow2));
1673     // On Android we always have 3 frames at the bottom: __start_main,
1674     // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
1675     // Allocator::allocate. This leaves 10 frames for the user app. The next
1676     // smallest power of two (8) would only leave 2, which is clearly too
1677     // little.
1678     constexpr u32 kFramesPerStack = 16;
1679     static_assert(isPowerOfTwo(kFramesPerStack));
1680 
1681     if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
1682       return;
1683     u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
1684                                                      AllocationRingBufferSize));
1685     if (TabSize > UINT32_MAX / kFramesPerStack)
1686       return;
1687     u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
1688 
1689     uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
1690                           sizeof(atomic_u32) * TabSize;
1691     MemMapT DepotMap;
1692     DepotMap.map(
1693         /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
1694         "scudo:stack_depot");
1695     auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
1696     Depot->init(RingSize, TabSize);
1697 
1698     MemMapT MemMap;
1699     MemMap.map(
1700         /*Addr=*/0U,
1701         roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
1702                 getPageSizeCached()),
1703         "scudo:ring_buffer");
1704     auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
1705     RB->RawRingBufferMap = MemMap;
1706     RB->RingBufferElements = AllocationRingBufferSize;
1707     RB->Depot = Depot;
1708     RB->StackDepotSize = StackDepotSize;
1709     RB->RawStackDepotMap = DepotMap;
1710 
1711     atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
1712                  memory_order_release);
1713   }
1714 
unmapRingBuffer()1715   void unmapRingBuffer() {
1716     AllocationRingBuffer *RB = getRingBuffer();
1717     if (RB == nullptr)
1718       return;
1719     // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1720     // is very important.
1721     RB->RawStackDepotMap.unmap();
1722     // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1723     // itself. Take over the ownership before calling unmap() so that any
1724     // operation along with unmap() won't touch inaccessible pages.
1725     MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1726     RawRingBufferMap.unmap();
1727     atomic_store(&RingBufferAddress, 0, memory_order_release);
1728   }
1729 
ringBufferSizeInBytes(u32 RingBufferElements)1730   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
1731     return sizeof(AllocationRingBuffer) +
1732            RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1733   }
1734 
ringBufferElementsFromBytes(size_t Bytes)1735   static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1736     if (Bytes < sizeof(AllocationRingBuffer)) {
1737       return 0;
1738     }
1739     return (Bytes - sizeof(AllocationRingBuffer)) /
1740            sizeof(typename AllocationRingBuffer::Entry);
1741   }
1742 };
1743 
1744 } // namespace scudo
1745 
1746 #endif // SCUDO_COMBINED_H_
1747