xref: /aosp_15_r20/external/skia/src/base/SkBlockAllocator.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/base/SkBlockAllocator.h"
9 
10 #include "include/private/base/SkDebug.h"
11 #include "include/private/base/SkTo.h"
12 
13 #ifdef SK_DEBUG
14 #include <vector>
15 #endif
16 
SkBlockAllocator(GrowthPolicy policy,size_t blockIncrementBytes,size_t additionalPreallocBytes)17 SkBlockAllocator::SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
18                                    size_t additionalPreallocBytes)
19         : fTail(&fHead)
20         // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
21         // can effectively fit higher byte counts in its 16 bits of storage
22         , fBlockIncrement(SkTo<uint16_t>(
23                 std::min(SkAlignTo(blockIncrementBytes, kAddressAlign) / kAddressAlign,
24                          (size_t) std::numeric_limits<uint16_t>::max())))
25         , fGrowthPolicy(static_cast<uint64_t>(policy))
26         , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
27         , fN1(1)
28         // The head block always fills remaining space from SkBlockAllocator's size, because it's
29         // inline, but can take over the specified number of bytes immediately after it.
30         , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
31     SkASSERT(fBlockIncrement >= 1);
32     SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
33 }
34 
Block(Block * prev,int allocationSize)35 SkBlockAllocator::Block::Block(Block* prev, int allocationSize)
36          : fNext(nullptr)
37          , fPrev(prev)
38          , fSize(allocationSize)
39          , fCursor(kDataStart)
40          , fMetadata(0)
41          , fAllocatorMetadata(0) {
42     SkASSERT(allocationSize >= (int) sizeof(Block));
43     SkDEBUGCODE(fSentinel = kAssignedMarker;)
44 
45     this->poisonRange(kDataStart, fSize);
46 }
47 
~Block()48 SkBlockAllocator::Block::~Block() {
49     this->unpoisonRange(kDataStart, fSize);
50 
51     SkASSERT(fSentinel == kAssignedMarker);
52     SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
53 }
54 
totalSize() const55 size_t SkBlockAllocator::totalSize() const {
56     // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
57     size_t size = offsetof(SkBlockAllocator, fHead) + this->scratchBlockSize();
58     for (const Block* b : this->blocks()) {
59         size += b->fSize;
60     }
61     SkASSERT(size >= this->preallocSize());
62     return size;
63 }
64 
totalUsableSpace() const65 size_t SkBlockAllocator::totalUsableSpace() const {
66     size_t size = this->scratchBlockSize();
67     if (size > 0) {
68         size -= kDataStart; // scratchBlockSize reports total block size, not usable size
69     }
70     for (const Block* b : this->blocks()) {
71         size += (b->fSize - kDataStart);
72     }
73     SkASSERT(size >= this->preallocUsableSpace());
74     return size;
75 }
76 
totalSpaceInUse() const77 size_t SkBlockAllocator::totalSpaceInUse() const {
78     size_t size = 0;
79     for (const Block* b : this->blocks()) {
80         size += (b->fCursor - kDataStart);
81     }
82     SkASSERT(size <= this->totalUsableSpace());
83     return size;
84 }
85 
findOwningBlock(const void * p)86 SkBlockAllocator::Block* SkBlockAllocator::findOwningBlock(const void* p) {
87     // When in doubt, search in reverse to find an overlapping block.
88     uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
89     for (Block* b : this->rblocks()) {
90         uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
91         uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
92         if (lowerBound <= ptr && ptr < upperBound) {
93             SkASSERT(b->fSentinel == kAssignedMarker);
94             return b;
95         }
96     }
97     return nullptr;
98 }
99 
releaseBlock(Block * block)100 void SkBlockAllocator::releaseBlock(Block* block) {
101      if (block == &fHead) {
102         // Reset the cursor of the head block so that it can be reused if it becomes the new tail
103         block->fCursor = kDataStart;
104         block->fMetadata = 0;
105         block->poisonRange(kDataStart, block->fSize);
106         // Unlike in reset(), we don't set the head's next block to null because there are
107         // potentially heap-allocated blocks that are still connected to it.
108     } else {
109         SkASSERT(block->fPrev);
110         block->fPrev->fNext = block->fNext;
111         if (block->fNext) {
112             SkASSERT(fTail != block);
113             block->fNext->fPrev = block->fPrev;
114         } else {
115             SkASSERT(fTail == block);
116             fTail = block->fPrev;
117         }
118 
119         // The released block becomes the new scratch block (if it's bigger), or delete it
120         if (this->scratchBlockSize() < block->fSize) {
121             SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
122             if (fHead.fPrev) {
123                 delete fHead.fPrev;
124             }
125             block->markAsScratch();
126             fHead.fPrev = block;
127         } else {
128             delete block;
129         }
130     }
131 
132     // Decrement growth policy (opposite of addBlock()'s increment operations)
133     GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
134     if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
135         SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
136         if (gp == GrowthPolicy::kLinear) {
137             fN1 = fN1 - fN0;
138         } else if (gp == GrowthPolicy::kFibonacci) {
139             // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
140             int temp = fN1 - fN0; // yields prior fN0
141             fN1 = fN1 - temp;     // yields prior fN1
142             fN0 = temp;
143         } else {
144             SkASSERT(gp == GrowthPolicy::kExponential);
145             // Divide by 2 to undo the 2N update from addBlock
146             fN1 = fN1 >> 1;
147             fN0 = fN1;
148         }
149     }
150 
151     SkASSERT(fN1 >= 1 && fN0 >= 0);
152 }
153 
stealHeapBlocks(SkBlockAllocator * other)154 void SkBlockAllocator::stealHeapBlocks(SkBlockAllocator* other) {
155     Block* toSteal = other->fHead.fNext;
156     if (toSteal) {
157         // The other's next block connects back to this allocator's current tail, and its new tail
158         // becomes the end of other's block linked list.
159         SkASSERT(other->fTail != &other->fHead);
160         toSteal->fPrev = fTail;
161         fTail->fNext = toSteal;
162         fTail = other->fTail;
163         // The other allocator becomes just its inline head block
164         other->fTail = &other->fHead;
165         other->fHead.fNext = nullptr;
166     } // else no block to steal
167 }
168 
reset()169 void SkBlockAllocator::reset() {
170     for (Block* b : this->rblocks()) {
171         if (b == &fHead) {
172             // Reset metadata and cursor, tail points to the head block again
173             fTail = b;
174             b->fNext = nullptr;
175             b->fCursor = kDataStart;
176             b->fMetadata = 0;
177             // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
178             // are reset/destroyed.
179             b->fAllocatorMetadata = 0;
180             b->poisonRange(kDataStart, b->fSize);
181             this->resetScratchSpace();
182         } else {
183             delete b;
184         }
185     }
186     SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
187              fHead.metadata() == 0 && fHead.fCursor == kDataStart);
188 
189     GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
190     fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
191     fN1 = 1;
192 }
193 
resetScratchSpace()194 void SkBlockAllocator::resetScratchSpace() {
195     if (fHead.fPrev) {
196         delete fHead.fPrev;
197         fHead.fPrev = nullptr;
198     }
199 }
200 
addBlock(int minSize,int maxSize)201 void SkBlockAllocator::addBlock(int minSize, int maxSize) {
202     SkASSERT(minSize > (int) sizeof(Block) && minSize <= maxSize);
203 
204     // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
205     static constexpr int kMaxN = (1 << 23) - 1;
206     static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
207 
208     auto alignAllocSize = [](int size) {
209         // Round to a nice boundary since the block isn't maxing out:
210         //   if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
211         //   nicely with jeMalloc (from SkArenaAlloc).
212         int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
213         return (size + mask) & ~mask;
214     };
215 
216     int allocSize;
217     void* mem = nullptr;
218     if (this->scratchBlockSize() >= minSize) {
219         // Activate the scratch block instead of making a new block
220         SkASSERT(fHead.fPrev->isScratch());
221         allocSize = fHead.fPrev->fSize;
222         mem = fHead.fPrev;
223         fHead.fPrev = nullptr;
224     } else if (minSize < maxSize) {
225         // Calculate the 'next' size per growth policy sequence
226         GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
227         int nextN1 = fN0 + fN1;
228         int nextN0;
229         if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
230             nextN0 = fN0;
231         } else if (gp == GrowthPolicy::kFibonacci) {
232             nextN0 = fN1;
233         } else {
234             SkASSERT(gp == GrowthPolicy::kExponential);
235             nextN0 = nextN1;
236         }
237         fN0 = std::min(kMaxN, nextN0);
238         fN1 = std::min(kMaxN, nextN1);
239 
240         // However, must guard against overflow here, since all the size-based asserts prevented
241         // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
242         int sizeIncrement = fBlockIncrement * kAddressAlign;
243         if (maxSize / sizeIncrement < nextN1) {
244             // The growth policy would overflow, so use the max. We've already confirmed that
245             // maxSize will be sufficient for the requested minimumSize
246             allocSize = maxSize;
247         } else {
248             allocSize = std::min(alignAllocSize(std::max(minSize, sizeIncrement * nextN1)),
249                                  maxSize);
250         }
251     } else {
252         SkASSERT(minSize == maxSize);
253         // Still align on a nice boundary, no max clamping since that would just undo the alignment
254         allocSize = alignAllocSize(minSize);
255     }
256 
257     // Create new block and append to the linked list of blocks in this allocator
258     if (!mem) {
259         mem = operator new(allocSize);
260     }
261     fTail->fNext = new (mem) Block(fTail, allocSize);
262     fTail = fTail->fNext;
263 }
264 
265 #ifdef SK_DEBUG
validate() const266 void SkBlockAllocator::validate() const {
267     std::vector<const Block*> blocks;
268     const Block* prev = nullptr;
269     for (const Block* block : this->blocks()) {
270         blocks.push_back(block);
271 
272         SkASSERT(kAssignedMarker == block->fSentinel);
273         if (block == &fHead) {
274             // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
275             // considered part of the linked list
276             SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
277         } else {
278             SkASSERT(prev == block->fPrev);
279         }
280         if (prev) {
281             SkASSERT(prev->fNext == block);
282         }
283 
284         SkASSERT(block->fSize >= (int) sizeof(Block));
285         SkASSERT(block->fCursor >= kDataStart);
286         SkASSERT(block->fCursor <= block->fSize);
287 
288         prev = block;
289     }
290     SkASSERT(prev == fTail);
291     SkASSERT(!blocks.empty());
292     SkASSERT(blocks[0] == &fHead);
293 
294     // Confirm reverse iteration matches forward iteration
295     size_t j = blocks.size();
296     for (const Block* b : this->rblocks()) {
297         SkASSERT(b == blocks[j - 1]);
298         j--;
299     }
300     SkASSERT(j == 0);
301 }
302 #endif
303