xref: /aosp_15_r20/external/skia/src/base/SkArenaAlloc.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkArenaAlloc_DEFINED
9 #define SkArenaAlloc_DEFINED
10 
11 #include "include/private/base/SkASAN.h"
12 #include "include/private/base/SkAssert.h"
13 #include "include/private/base/SkSpan_impl.h"
14 #include "include/private/base/SkTFitsIn.h"
15 #include "include/private/base/SkTo.h"
16 
17 #include <algorithm>
18 #include <array>
19 #include <cstdint>
20 #include <cstdlib>
21 #include <cstring>
22 #include <limits>
23 #include <new>
24 #include <type_traits>  // IWYU pragma: keep
25 #include <utility>
26 
27 // We found allocating strictly doubling amounts of memory from the heap left too
28 // much unused slop, particularly on Android.  Instead we'll follow a Fibonacci-like
29 // progression.
30 
31 // SkFibonacci47 is the first 47 Fibonacci numbers. Fib(47) is the largest value less than 2 ^ 32.
32 extern std::array<const uint32_t, 47> SkFibonacci47;
33 template<uint32_t kMaxSize>
34 class SkFibBlockSizes {
35 public:
36     // staticBlockSize, and firstAllocationSize are parameters describing the initial memory
37     // layout. staticBlockSize describes the size of the inlined memory, and firstAllocationSize
38     // describes the size of the first block to be allocated if the static block is exhausted. By
39     // convention, firstAllocationSize is the first choice for the block unit size followed by
40     // staticBlockSize followed by the default of 1024 bytes.
SkFibBlockSizes(uint32_t staticBlockSize,uint32_t firstAllocationSize)41     SkFibBlockSizes(uint32_t staticBlockSize, uint32_t firstAllocationSize) : fIndex{0} {
42         fBlockUnitSize = firstAllocationSize > 0 ? firstAllocationSize :
43                          staticBlockSize     > 0 ? staticBlockSize     : 1024;
44 
45         SkASSERT_RELEASE(0 < fBlockUnitSize);
46         SkASSERT_RELEASE(fBlockUnitSize < std::min(kMaxSize, (1u << 26) - 1));
47     }
48 
nextBlockSize()49     uint32_t nextBlockSize() {
50         uint32_t result = SkFibonacci47[fIndex] * fBlockUnitSize;
51 
52         if (SkTo<size_t>(fIndex + 1) < SkFibonacci47.size() &&
53             SkFibonacci47[fIndex + 1] < kMaxSize / fBlockUnitSize)
54         {
55             fIndex += 1;
56         }
57 
58         return result;
59     }
60 
61 private:
62     uint32_t fIndex : 6;
63     uint32_t fBlockUnitSize : 26;
64 };
65 
66 // SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
67 // to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
68 // (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
69 // starting with an allocation of firstHeapAllocation bytes.  If your data (plus a small overhead)
70 // fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in
71 // firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for
72 // firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used.
73 //
74 // Examples:
75 //
76 //   char block[mostCasesSize];
77 //   SkArenaAlloc arena(block, mostCasesSize);
78 //
79 // If mostCasesSize is too large for the stack, you can use the following pattern.
80 //
81 //   std::unique_ptr<char[]> block{new char[mostCasesSize]};
82 //   SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
83 //
84 // If the program only sometimes allocates memory, use the following pattern.
85 //
86 //   SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
87 //
88 // The storage does not necessarily need to be on the stack. Embedding the storage in a class also
89 // works.
90 //
91 //   class Foo {
92 //       char storage[mostCasesSize];
93 //       SkArenaAlloc arena (storage, mostCasesSize);
94 //   };
95 //
96 // In addition, the system is optimized to handle POD data including arrays of PODs (where
97 // POD is really data with no destructors). For POD data it has zero overhead per item, and a
98 // typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4
99 // bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There
100 // is an addition overhead when switching from POD data to non-POD data of typically 8 bytes.
101 //
102 // If additional blocks are needed they are increased exponentially. This strategy bounds the
103 // recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
104 // the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
105 // there are 71 allocations.
106 class SkArenaAlloc {
107 public:
108     SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation);
109 
SkArenaAlloc(size_t firstHeapAllocation)110     explicit SkArenaAlloc(size_t firstHeapAllocation)
111         : SkArenaAlloc(nullptr, 0, firstHeapAllocation) {}
112 
113     SkArenaAlloc(const SkArenaAlloc&) = delete;
114     SkArenaAlloc& operator=(const SkArenaAlloc&) = delete;
115     SkArenaAlloc(SkArenaAlloc&&) = delete;
116     SkArenaAlloc& operator=(SkArenaAlloc&&) = delete;
117 
118     ~SkArenaAlloc();
119 
120     template <typename Ctor>
121     auto make(Ctor&& ctor) -> decltype(ctor(nullptr)) {
122         using T = std::remove_pointer_t<decltype(ctor(nullptr))>;
123 
124         uint32_t size      = SkToU32(sizeof(T));
125         uint32_t alignment = SkToU32(alignof(T));
126         char* objStart;
127         if (std::is_trivially_destructible<T>::value) {
128             objStart = this->allocObject(size, alignment);
129             fCursor = objStart + size;
130             sk_asan_unpoison_memory_region(objStart, size);
131         } else {
132             objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
133             // Can never be UB because max value is alignof(T).
134             uint32_t padding = SkToU32(objStart - fCursor);
135 
136             // Advance to end of object to install footer.
137             fCursor = objStart + size;
138             sk_asan_unpoison_memory_region(objStart, size);
139             FooterAction* releaser = [](char* objEnd) {
140                 char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
141                 ((T*)objStart)->~T();
142                 return objStart;
143             };
144             this->installFooter(releaser, padding);
145         }
146 
147         // This must be last to make objects with nested use of this allocator work.
148         return ctor(objStart);
149     }
150 
151     template <typename T, typename... Args>
make(Args &&...args)152     T* make(Args&&... args) {
153         return this->make([&](void* objStart) {
154             return new(objStart) T(std::forward<Args>(args)...);
155         });
156     }
157 
158     template <typename T>
make()159     T* make() {
160         if constexpr (std::is_standard_layout<T>::value && std::is_trivial<T>::value) {
161             // Just allocate some aligned bytes. This generates smaller code.
162             return (T*)this->makeBytesAlignedTo(sizeof(T), alignof(T));
163         } else {
164             // This isn't a POD type, so construct the object properly.
165             return this->make([&](void* objStart) {
166                 return new(objStart) T();
167             });
168         }
169     }
170 
171     template <typename T>
makeArrayDefault(size_t count)172     T* makeArrayDefault(size_t count) {
173         T* array = this->allocUninitializedArray<T>(count);
174         for (size_t i = 0; i < count; i++) {
175             // Default initialization: if T is primitive then the value is left uninitialized.
176             new (&array[i]) T;
177         }
178         return array;
179     }
180 
181     template <typename T>
makeArray(size_t count)182     T* makeArray(size_t count) {
183         T* array = this->allocUninitializedArray<T>(count);
184         for (size_t i = 0; i < count; i++) {
185             // Value initialization: if T is primitive then the value is zero-initialized.
186             new (&array[i]) T();
187         }
188         return array;
189     }
190 
191     template <typename T, typename Initializer>
makeInitializedArray(size_t count,Initializer initializer)192     T* makeInitializedArray(size_t count, Initializer initializer) {
193         T* array = this->allocUninitializedArray<T>(count);
194         for (size_t i = 0; i < count; i++) {
195             new (&array[i]) T(initializer(i));
196         }
197         return array;
198     }
199 
200     template <typename T>
makeArrayCopy(SkSpan<const T> toCopy)201     T* makeArrayCopy(SkSpan<const T> toCopy) {
202         T* array = this->allocUninitializedArray<T>(toCopy.size());
203         if constexpr (std::is_trivially_copyable<T>::value) {
204             memcpy(array, toCopy.data(), toCopy.size_bytes());
205         } else {
206             for (size_t i = 0; i < toCopy.size(); ++i) {
207                 new (&array[i]) T(toCopy[i]);
208             }
209         }
210         return array;
211     }
212 
213     // Only use makeBytesAlignedTo if none of the typed variants are practical to use.
makeBytesAlignedTo(size_t size,size_t align)214     void* makeBytesAlignedTo(size_t size, size_t align) {
215         AssertRelease(SkTFitsIn<uint32_t>(size));
216         auto objStart = this->allocObject(SkToU32(size), SkToU32(align));
217         fCursor = objStart + size;
218         sk_asan_unpoison_memory_region(objStart, size);
219         return objStart;
220     }
221 
222 protected:
223     using FooterAction = char* (char*);
224     struct Footer {
225         uint8_t unaligned_action[sizeof(FooterAction*)];
226         uint8_t padding;
227     };
228 
cursor()229     char* cursor() { return fCursor; }
end()230     char* end() { return fEnd; }
231 
232 private:
AssertRelease(bool cond)233     static void AssertRelease(bool cond) { if (!cond) { ::abort(); } }
234 
235     static char* SkipPod(char* footerEnd);
236     static void RunDtorsOnBlock(char* footerEnd);
237     static char* NextBlock(char* footerEnd);
238 
239     template <typename T>
installRaw(const T & val)240     void installRaw(const T& val) {
241         sk_asan_unpoison_memory_region(fCursor, sizeof(val));
242         memcpy(fCursor, &val, sizeof(val));
243         fCursor += sizeof(val);
244     }
245     void installFooter(FooterAction* releaser, uint32_t padding);
246 
247     void ensureSpace(uint32_t size, uint32_t alignment);
248 
allocObject(uint32_t size,uint32_t alignment)249     char* allocObject(uint32_t size, uint32_t alignment) {
250         uintptr_t mask = alignment - 1;
251         uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
252         uintptr_t totalSize = size + alignedOffset;
253         AssertRelease(totalSize >= size);
254         if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
255             this->ensureSpace(size, alignment);
256             alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
257         }
258 
259         char* object = fCursor + alignedOffset;
260 
261         SkASSERT((reinterpret_cast<uintptr_t>(object) & (alignment - 1)) == 0);
262         SkASSERT(object + size <= fEnd);
263 
264         return object;
265     }
266 
267     char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
268 
269     template <typename T>
allocUninitializedArray(size_t countZ)270     T* allocUninitializedArray(size_t countZ) {
271         AssertRelease(SkTFitsIn<uint32_t>(countZ));
272         uint32_t count = SkToU32(countZ);
273 
274         char* objStart;
275         AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
276         uint32_t arraySize = SkToU32(count * sizeof(T));
277         uint32_t alignment = SkToU32(alignof(T));
278 
279         if (std::is_trivially_destructible<T>::value) {
280             objStart = this->allocObject(arraySize, alignment);
281             fCursor = objStart + arraySize;
282             sk_asan_unpoison_memory_region(objStart, arraySize);
283         } else {
284             constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
285             AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
286             uint32_t totalSize = arraySize + overhead;
287             objStart = this->allocObjectWithFooter(totalSize, alignment);
288 
289             // Can never be UB because max value is alignof(T).
290             uint32_t padding = SkToU32(objStart - fCursor);
291 
292             // Advance to end of array to install footer.
293             fCursor = objStart + arraySize;
294             sk_asan_unpoison_memory_region(objStart, arraySize);
295             this->installRaw(SkToU32(count));
296             this->installFooter(
297                 [](char* footerEnd) {
298                     char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
299                     uint32_t count;
300                     memmove(&count, objEnd, sizeof(uint32_t));
301                     char* objStart = objEnd - count * sizeof(T);
302                     T* array = (T*) objStart;
303                     for (uint32_t i = 0; i < count; i++) {
304                         array[i].~T();
305                     }
306                     return objStart;
307                 },
308                 padding);
309         }
310 
311         return (T*)objStart;
312     }
313 
314     char*          fDtorCursor;
315     char*          fCursor;
316     char*          fEnd;
317 
318     SkFibBlockSizes<std::numeric_limits<uint32_t>::max()> fFibonacciProgression;
319 };
320 
321 class SkArenaAllocWithReset : public SkArenaAlloc {
322 public:
323     SkArenaAllocWithReset(char* block, size_t blockSize, size_t firstHeapAllocation);
324 
SkArenaAllocWithReset(size_t firstHeapAllocation)325     explicit SkArenaAllocWithReset(size_t firstHeapAllocation)
326             : SkArenaAllocWithReset(nullptr, 0, firstHeapAllocation) {}
327 
328     // Destroy all allocated objects, free any heap allocations.
329     void reset();
330 
331     // Returns true if the alloc has never made any objects.
332     bool isEmpty();
333 
334 private:
335     char* const    fFirstBlock;
336     const uint32_t fFirstSize;
337     const uint32_t fFirstHeapAllocationSize;
338 };
339 
340 // Helper for defining allocators with inline/reserved storage.
341 // For argument declarations, stick to the base type (SkArenaAlloc).
342 // Note: Inheriting from the storage first means the storage will outlive the
343 // SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors.
344 // (This is mostly only relevant for strict tools like MSAN.)
345 template <size_t InlineStorageSize>
346 class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc {
347 public:
348     explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize)
349         : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {}
350 
~SkSTArenaAlloc()351     ~SkSTArenaAlloc() {
352         // Be sure to unpoison the memory that is probably on the stack.
353         sk_asan_unpoison_memory_region(this->data(), this->size());
354     }
355 };
356 
357 template <size_t InlineStorageSize>
358 class SkSTArenaAllocWithReset
359         : private std::array<char, InlineStorageSize>, public SkArenaAllocWithReset {
360 public:
361     explicit SkSTArenaAllocWithReset(size_t firstHeapAllocation = InlineStorageSize)
362             : SkArenaAllocWithReset{this->data(), this->size(), firstHeapAllocation} {}
363 
~SkSTArenaAllocWithReset()364     ~SkSTArenaAllocWithReset() {
365         // Be sure to unpoison the memory that is probably on the stack.
366         sk_asan_unpoison_memory_region(this->data(), this->size());
367     }
368 };
369 
370 #endif  // SkArenaAlloc_DEFINED
371