xref: /aosp_15_r20/external/skia/tests/SkBlockAllocatorTest.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/core/SkTypes.h"
9 #include "include/private/base/SkDebug.h"
10 #include "src/base/SkBlockAllocator.h"
11 #include "tests/Test.h"
12 
13 #include <cstdint>
14 #include <cstring>
15 #include <new>
16 #include <vector>
17 
18 using Block = SkBlockAllocator::Block;
19 using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
20 
21 class BlockAllocatorTestAccess {
22 public:
23     template<size_t N>
ScratchBlockSize(SkSBlockAllocator<N> & pool)24     static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) {
25         return (size_t) pool->scratchBlockSize();
26     }
27 };
28 
29 // Helper functions for modifying the allocator in a controlled manner
30 template<size_t N>
block_count(const SkSBlockAllocator<N> & pool)31 static int block_count(const SkSBlockAllocator<N>& pool) {
32     int ct = 0;
33     for (const Block* b : pool->blocks()) {
34         (void) b;
35         ct++;
36     }
37     return ct;
38 }
39 
40 template<size_t N>
get_block(SkSBlockAllocator<N> & pool,int blockIndex)41 static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) {
42     Block* found = nullptr;
43     int i = 0;
44     for (Block* b: pool->blocks()) {
45         if (i == blockIndex) {
46             found = b;
47             break;
48         }
49         i++;
50     }
51 
52     SkASSERT(found != nullptr);
53     return found;
54 }
55 
56 // SkBlockAllocator holds on to the largest last-released block to reuse for new allocations,
57 // and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
58 // in many of these tests.
59 template<size_t N>
total_size(SkSBlockAllocator<N> & pool)60 static size_t total_size(SkSBlockAllocator<N>& pool) {
61     return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool);
62 }
63 
64 template<size_t N>
add_block(SkSBlockAllocator<N> & pool)65 static size_t add_block(SkSBlockAllocator<N>& pool) {
66     size_t currentSize = total_size(pool);
67     SkBlockAllocator::Block* current = pool->currentBlock();
68     while(pool->currentBlock() == current) {
69         pool->template allocate<4>(pool->preallocSize() / 2);
70     }
71     return total_size(pool) - currentSize;
72 }
73 
74 template<size_t N>
alloc_byte(SkSBlockAllocator<N> & pool)75 static void* alloc_byte(SkSBlockAllocator<N>& pool) {
76     auto br = pool->template allocate<1>(1);
77     return br.fBlock->ptr(br.fAlignedOffset);
78 }
79 
DEF_TEST(SkBlockAllocatorPreallocSize,r)80 DEF_TEST(SkBlockAllocatorPreallocSize, r) {
81     // Tests stack/member initialization, option #1 described in doc
82     SkBlockAllocator stack{GrowthPolicy::kFixed, 2048};
83     SkDEBUGCODE(stack.validate();)
84 
85     REPORTER_ASSERT(r, stack.preallocSize() == sizeof(SkBlockAllocator));
86     REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
87 
88     // Tests placement new initialization to increase head block size, option #2
89     void* mem = operator new(1024);
90     SkBlockAllocator* placement = new (mem) SkBlockAllocator(GrowthPolicy::kLinear, 1024,
91                                                              1024 - sizeof(SkBlockAllocator));
92     REPORTER_ASSERT(r, placement->preallocSize() == 1024);
93     REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
94                        placement->preallocUsableSpace() >= (1024 - sizeof(SkBlockAllocator)));
95     placement->~SkBlockAllocator();
96     operator delete(mem);
97 
98     // Tests inline increased preallocation, option #3
99     SkSBlockAllocator<2048> inlined{};
100     SkDEBUGCODE(inlined->validate();)
101     REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
102     REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
103                        inlined->preallocUsableSpace() >= (2048 - sizeof(SkBlockAllocator)));
104 }
105 
DEF_TEST(SkBlockAllocatorAlloc,r)106 DEF_TEST(SkBlockAllocatorAlloc, r) {
107     SkSBlockAllocator<1024> pool{};
108     SkDEBUGCODE(pool->validate();)
109 
110     // Assumes the previous pointer was in the same block
111     auto validate_ptr = [&](int align, int size,
112                             SkBlockAllocator::ByteRange br,
113                             SkBlockAllocator::ByteRange* prevBR) {
114         uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
115         // Matches the requested align
116         REPORTER_ASSERT(r, pt % align == 0);
117         // And large enough
118         REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
119         // And has enough padding for alignment
120         REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
121         REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
122         // And block of the returned struct is the current block of the allocator
123         REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
124 
125         // And make sure that we're past the required end of the previous allocation
126         if (prevBR) {
127             uintptr_t prevEnd =
128                     reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
129             REPORTER_ASSERT(r, pt > prevEnd);
130         }
131 
132         // And make sure that the entire byte range is safe to write into (excluding the dead space
133         // between "start" and "aligned offset," which is just padding and is left poisoned)
134         std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
135     };
136 
137     auto p1 = pool->allocate<1>(14);
138     validate_ptr(1, 14, p1, nullptr);
139 
140     auto p2 = pool->allocate<2>(24);
141     validate_ptr(2, 24, p2, &p1);
142 
143     auto p4 = pool->allocate<4>(28);
144     validate_ptr(4, 28, p4, &p2);
145 
146     auto p8 = pool->allocate<8>(40);
147     validate_ptr(8, 40, p8, &p4);
148 
149     auto p16 = pool->allocate<16>(64);
150     validate_ptr(16, 64, p16, &p8);
151 
152     auto p32 = pool->allocate<32>(96);
153     validate_ptr(32, 96, p32, &p16);
154 
155     // All of these allocations should be in the head block
156     REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
157     SkDEBUGCODE(pool->validate();)
158 
159     // Requesting an allocation of avail() should not make a new block
160     size_t avail = pool->currentBlock()->avail<4>();
161     auto pAvail = pool->allocate<4>(avail);
162     validate_ptr(4, avail, pAvail, &p32);
163 
164     // Remaining should be less than the alignment that was requested, and then
165     // the next allocation will make a new block
166     REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
167     auto pNextBlock = pool->allocate<4>(4);
168     validate_ptr(4, 4, pNextBlock, nullptr);
169     REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
170 
171     // Allocating more than avail() makes an another block
172     size_t currentSize = total_size(pool);
173     size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
174     auto pTooBig = pool->allocate<4>(bigRequest);
175     validate_ptr(4, bigRequest, pTooBig, nullptr);
176     REPORTER_ASSERT(r, total_size(pool) > currentSize);
177 
178     // Allocating more than the default growth policy (1024 in this case), will fulfill the request
179     REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
180     currentSize = total_size(pool);
181     auto pReallyTooBig = pool->allocate<4>(4096);
182     validate_ptr(4, 4096, pReallyTooBig, nullptr);
183     REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
184     SkDEBUGCODE(pool->validate();)
185 }
186 
DEF_TEST(SkBlockAllocatorResize,r)187 DEF_TEST(SkBlockAllocatorResize, r) {
188     SkSBlockAllocator<1024> pool{};
189     SkDEBUGCODE(pool->validate();)
190 
191     // Fixed resize from 16 to 32
192     SkBlockAllocator::ByteRange p = pool->allocate<4>(16);
193     REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
194     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
195     p.fEnd += 16;
196 
197     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x11, p.fEnd - p.fAlignedOffset);
198 
199     // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
200     auto pNext = pool->allocate<4>(16);
201     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
202                        reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
203     REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
204     REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
205 
206     // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
207     REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
208     int fillBlock = p.fBlock->avail<4>();
209     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
210     p.fEnd += fillBlock;
211 
212     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x22, p.fEnd - p.fAlignedOffset);
213 
214     // Confirm that resizing when there's not enough room fails
215     REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
216     REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
217 
218     // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
219     int shrinkTo32 = p.fStart - p.fEnd + 32;
220     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
221     p.fEnd += shrinkTo32;
222     REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
223 
224     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x33, p.fEnd - p.fAlignedOffset);
225 
226     pNext = pool->allocate<4>(16);
227     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
228                        reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
229     SkDEBUGCODE(pool->validate();)
230 
231     // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
232     int shrinkTo0 = pNext.fStart - pNext.fEnd;
233 #ifndef SK_DEBUG
234     // Only test for false on release builds; a negative size should assert on debug builds
235     REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
236 #endif
237     REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
238 }
239 
DEF_TEST(SkBlockAllocatorRelease,r)240 DEF_TEST(SkBlockAllocatorRelease, r) {
241     SkSBlockAllocator<1024> pool{};
242     SkDEBUGCODE(pool->validate();)
243 
244     // Successful allocate and release
245     auto p = pool->allocate<8>(32);
246     REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
247     // Ensure the above release actually means the next allocation reuses the same space
248     auto p2 = pool->allocate<8>(32);
249     REPORTER_ASSERT(r, p.fStart == p2.fStart);
250 
251     // Confirm that 'p2' cannot be released if another allocation came after it
252     auto p3 = pool->allocate<8>(64);
253     (void) p3;
254     REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
255 
256     // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
257     // can be released simultaneously (equivalent to 'p3' then 'p2').
258     auto p4 = pool->allocate<8>(16);
259     auto p5 = pool->allocate<8>(96);
260     REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
261     REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
262     REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
263 
264     // And confirm that passing in the wrong size for the allocation fails
265     p = pool->allocate<8>(32);
266     REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
267     REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
268     REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
269     SkDEBUGCODE(pool->validate();)
270 }
271 
DEF_TEST(SkBlockAllocatorRewind,r)272 DEF_TEST(SkBlockAllocatorRewind, r) {
273     // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
274     // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
275     SkSBlockAllocator<1024> pool{};
276     SkDEBUGCODE(pool->validate();)
277 
278     std::vector<SkBlockAllocator::ByteRange> ptrs;
279     ptrs.reserve(32); // silence clang-tidy performance warning
280     for (int i = 0; i < 32; ++i) {
281         ptrs.push_back(pool->allocate<4>(16));
282     }
283 
284     // Release everything in reverse order
285     SkDEBUGCODE(pool->validate();)
286     for (int i = 31; i >= 0; --i) {
287         auto br = ptrs[i];
288         REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
289     }
290 
291     // If correct, we've rewound all the way back to the start of the block, so a new allocation
292     // will have the same location as ptrs[0]
293     SkDEBUGCODE(pool->validate();)
294     REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
295 }
296 
DEF_TEST(SkBlockAllocatorGrowthPolicy,r)297 DEF_TEST(SkBlockAllocatorGrowthPolicy, r) {
298     static constexpr int kInitSize = 128;
299     static constexpr int kBlockCount = 5;
300     static constexpr size_t kExpectedSizes[SkBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
301         // kFixed -> kInitSize per block
302         { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
303         // kLinear -> (block ct + 1) * kInitSize for next block
304         { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
305         // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
306         { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
307         // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
308         { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
309     };
310 
311     for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
312         SkSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
313         SkDEBUGCODE(pool->validate();)
314 
315         REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
316         for (int i = 1; i < kBlockCount; ++i) {
317             REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
318         }
319 
320         SkDEBUGCODE(pool->validate();)
321     }
322 }
323 
DEF_TEST(SkBlockAllocatorReset,r)324 DEF_TEST(SkBlockAllocatorReset, r) {
325     static constexpr int kBlockIncrement = 1024;
326 
327     SkSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
328     SkDEBUGCODE(pool->validate();)
329 
330     void* firstAlloc = alloc_byte(pool);
331 
332     // Add several blocks
333     add_block(pool);
334     add_block(pool);
335     add_block(pool);
336     SkDEBUGCODE(pool->validate();)
337 
338     REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
339 
340     get_block(pool, 0)->setMetadata(2);
341 
342     // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
343     // and new blocks are sized based on a reset growth policy.
344     pool->reset();
345     SkDEBUGCODE(pool->validate();)
346 
347     REPORTER_ASSERT(r,block_count(pool) == 1);
348     REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
349     REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
350 
351     REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
352     REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
353     REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
354     SkDEBUGCODE(pool->validate();)
355 }
356 
DEF_TEST(SkBlockAllocatorReleaseBlock,r)357 DEF_TEST(SkBlockAllocatorReleaseBlock, r) {
358     // This loops over all growth policies to make sure that the incremental releases update the
359     // sequence correctly for each policy.
360     for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
361         SkSBlockAllocator<1024> pool{(GrowthPolicy) gp};
362         SkDEBUGCODE(pool->validate();)
363 
364         void* firstAlloc = alloc_byte(pool);
365 
366         size_t b1Size = total_size(pool);
367         size_t b2Size = add_block(pool);
368         size_t b3Size = add_block(pool);
369         size_t b4Size = add_block(pool);
370         SkDEBUGCODE(pool->validate();)
371 
372         get_block(pool, 0)->setMetadata(1);
373         get_block(pool, 1)->setMetadata(2);
374         get_block(pool, 2)->setMetadata(3);
375         get_block(pool, 3)->setMetadata(4);
376 
377         // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
378         // on the last iteration, will test tail removal).
379         REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
380         pool->releaseBlock(get_block(pool, 1));
381         REPORTER_ASSERT(r, block_count(pool) == 3);
382         REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
383         REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
384 
385         pool->releaseBlock(get_block(pool, 1));
386         REPORTER_ASSERT(r, block_count(pool) == 2);
387         REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
388         REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
389 
390         pool->releaseBlock(get_block(pool, 1));
391         REPORTER_ASSERT(r, block_count(pool) == 1);
392         REPORTER_ASSERT(r, total_size(pool) == b1Size);
393 
394         // Since we're back to just the head block, if we add a new block, the growth policy should
395         // match the original sequence instead of continuing with "b5Size'"
396         pool->resetScratchSpace();
397         size_t size = add_block(pool);
398         REPORTER_ASSERT(r, size == b2Size);
399         pool->releaseBlock(get_block(pool, 1));
400 
401         // Explicitly release the head block and confirm it's reset
402         pool->releaseBlock(get_block(pool, 0));
403         REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
404         REPORTER_ASSERT(r, block_count(pool) == 1);
405         REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
406         REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
407 
408         // Confirm that if we have > 1 block, but release the head block we can still access the
409         // others
410         add_block(pool);
411         add_block(pool);
412         pool->releaseBlock(get_block(pool, 0));
413         REPORTER_ASSERT(r, block_count(pool) == 3);
414         SkDEBUGCODE(pool->validate();)
415     }
416 }
417 
DEF_TEST(SkBlockAllocatorIterateAndRelease,r)418 DEF_TEST(SkBlockAllocatorIterateAndRelease, r) {
419     SkSBlockAllocator<256> pool;
420 
421     pool->headBlock()->setMetadata(1);
422     add_block(pool);
423     add_block(pool);
424     add_block(pool);
425 
426     // Loop forward and release the blocks
427     int releaseCount = 0;
428     for (auto* b : pool->blocks()) {
429         pool->releaseBlock(b);
430         releaseCount++;
431     }
432     REPORTER_ASSERT(r, releaseCount == 4);
433     // pool should have just the head block, but was reset
434     REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
435     REPORTER_ASSERT(r, block_count(pool) == 1);
436 
437     // Add more blocks
438     pool->headBlock()->setMetadata(1);
439     add_block(pool);
440     add_block(pool);
441     add_block(pool);
442 
443     // Loop in reverse and release the blocks
444     releaseCount = 0;
445     for (auto* b : pool->rblocks()) {
446         pool->releaseBlock(b);
447         releaseCount++;
448     }
449     REPORTER_ASSERT(r, releaseCount == 4);
450     // pool should have just the head block, but was reset
451     REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
452     REPORTER_ASSERT(r, block_count(pool) == 1);
453 }
454 
DEF_TEST(SkBlockAllocatorScratchBlockReserve,r)455 DEF_TEST(SkBlockAllocatorScratchBlockReserve, r) {
456     SkSBlockAllocator<256> pool;
457 
458     size_t added = add_block(pool);
459     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
460     size_t total = pool->totalSize();
461     pool->releaseBlock(pool->currentBlock());
462 
463     // Total size shouldn't have changed, the released block should become scratch
464     REPORTER_ASSERT(r, pool->totalSize() == total);
465     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == added);
466 
467     // But a reset definitely deletes any scratch block
468     pool->reset();
469     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
470 
471     // Reserving more than what's available adds a scratch block, and current block remains avail.
472     size_t avail = pool->currentBlock()->avail();
473     size_t reserve = avail + 1;
474     pool->reserve(reserve);
475     REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
476     // And rounds up to the fixed size of this pool's growth policy
477     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= reserve &&
478                        BlockAllocatorTestAccess::ScratchBlockSize(pool) % 256 == 0);
479 
480     // Allocating more than avail activates the scratch block (so totalSize doesn't change)
481     size_t preAllocTotalSize = pool->totalSize();
482     pool->allocate<1>(avail + 1);
483     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
484     REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
485 
486     // When reserving less than what's still available in the current block, no scratch block is
487     // added.
488     pool->reserve(pool->currentBlock()->avail());
489     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
490 
491     // Unless checking available bytes is disabled
492     pool->reserve(pool->currentBlock()->avail(), SkBlockAllocator::kIgnoreExistingBytes_Flag);
493     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0);
494 
495     // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
496     // follow the size (which in this case is a fixed 256 bytes per block).
497     pool->resetScratchSpace();
498     pool->reserve(32, SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
499     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0 &&
500                        BlockAllocatorTestAccess::ScratchBlockSize(pool) < 256);
501 
502     // When requesting an allocation larger than the current block and the scratch block, a new
503     // block is added, and the scratch block remains scratch.
504     SkBlockAllocator::Block* oldTail = pool->currentBlock();
505     avail = oldTail->avail();
506     size_t scratchAvail = 2 * avail;
507     pool->reserve(scratchAvail);
508     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= scratchAvail);
509 
510     // This allocation request is higher than oldTail's available, and the scratch size so we
511     // should add a new block and scratch size should stay the same.
512     scratchAvail = BlockAllocatorTestAccess::ScratchBlockSize(pool);
513     pool->allocate<1>(scratchAvail + 1);
514     REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
515     REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == scratchAvail);
516 }
517 
DEF_TEST(SkBlockAllocatorStealBlocks,r)518 DEF_TEST(SkBlockAllocatorStealBlocks, r) {
519     SkSBlockAllocator<256> poolA;
520     SkSBlockAllocator<128> poolB;
521 
522     add_block(poolA);
523     add_block(poolA);
524     add_block(poolA);
525 
526     add_block(poolB);
527     add_block(poolB);
528 
529     char* bAlloc = (char*) alloc_byte(poolB);
530     *bAlloc = 't';
531 
532     const SkBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
533 
534     REPORTER_ASSERT(r, block_count(poolA) == 4);
535     REPORTER_ASSERT(r, block_count(poolB) == 3);
536 
537     size_t aSize = poolA->totalSize();
538     size_t bSize = poolB->totalSize();
539     size_t theftSize = bSize - poolB->preallocSize();
540 
541     // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
542     poolA->stealHeapBlocks(poolB.allocator());
543     REPORTER_ASSERT(r, block_count(poolA) == 6);
544     REPORTER_ASSERT(r, block_count(poolB) == 1);
545     REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
546     REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
547 
548     REPORTER_ASSERT(r, *bAlloc == 't');
549     REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
550     REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
551 
552     // Redoing the steal now that B is just a head block should be a no-op
553     poolA->stealHeapBlocks(poolB.allocator());
554     REPORTER_ASSERT(r, block_count(poolA) == 6);
555     REPORTER_ASSERT(r, block_count(poolB) == 1);
556 }
557 
558 // These tests ensure that the allocation padding mechanism works as intended
559 struct TestMeta {
560     int fX1;
561     int fX2;
562 };
563 struct alignas(32) TestMetaBig {
564     int fX1;
565     int fX2;
566 };
567 
DEF_TEST(SkBlockAllocatorMetadata,r)568 DEF_TEST(SkBlockAllocatorMetadata, r) {
569     SkSBlockAllocator<1024> pool{};
570     SkDEBUGCODE(pool->validate();)
571 
572     // Allocation where alignment of user data > alignment of metadata
573     SkASSERT(alignof(TestMeta) < 16);
574     auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
575     SkDEBUGCODE(pool->validate();)
576 
577     REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
578     TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
579     // Confirm alignment for both pointers
580     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
581     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
582     // Access fields to make sure 'meta' matches compilers expectations...
583     meta->fX1 = 2;
584     meta->fX2 = 5;
585 
586     // Repeat, but for metadata that has a larger alignment than the allocation
587     SkASSERT(alignof(TestMetaBig) == 32);
588     auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
589     SkDEBUGCODE(pool->validate();)
590 
591     REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
592     TestMetaBig* metaBig = static_cast<TestMetaBig*>(
593             p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
594     // Confirm alignment for both pointers
595     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
596     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
597     // Access fields
598     metaBig->fX1 = 3;
599     metaBig->fX2 = 6;
600 
601     // Ensure metadata values persist after allocations
602     REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
603     REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
604 }
605 
DEF_TEST(SkBlockAllocatorAllocatorMetadata,r)606 DEF_TEST(SkBlockAllocatorAllocatorMetadata, r) {
607     SkSBlockAllocator<256> pool{};
608     SkDEBUGCODE(pool->validate();)
609 
610     REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
611 
612     pool->setMetadata(4);
613     REPORTER_ASSERT(r, pool->metadata() == 4);
614 
615     // Releasing the head block doesn't change the allocator's metadata (even though that's where
616     // it is stored).
617     pool->releaseBlock(pool->headBlock());
618     REPORTER_ASSERT(r, pool->metadata() == 4);
619 
620     // But resetting the whole allocator brings things back to as if it were newly constructed
621     pool->reset();
622     REPORTER_ASSERT(r, pool->metadata() == 0);
623 }
624 
625 template<size_t Align, size_t Padding>
run_owning_block_test(skiatest::Reporter * r,SkBlockAllocator * pool)626 static void run_owning_block_test(skiatest::Reporter* r, SkBlockAllocator* pool) {
627     auto br = pool->allocate<Align, Padding>(1);
628 
629     void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
630     void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
631 
632     Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
633     REPORTER_ASSERT(r, block == br.fBlock);
634 
635     block = pool->owningBlock<Align>(metaPtr, br.fStart);
636     REPORTER_ASSERT(r, block == br.fBlock);
637 
638     block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
639     REPORTER_ASSERT(r, block == br.fBlock);
640 }
641 
642 template<size_t Padding>
run_owning_block_tests(skiatest::Reporter * r,SkBlockAllocator * pool)643 static void run_owning_block_tests(skiatest::Reporter* r, SkBlockAllocator* pool) {
644     run_owning_block_test<1, Padding>(r, pool);
645     run_owning_block_test<2, Padding>(r, pool);
646     run_owning_block_test<4, Padding>(r, pool);
647     run_owning_block_test<8, Padding>(r, pool);
648     run_owning_block_test<16, Padding>(r, pool);
649     run_owning_block_test<32, Padding>(r, pool);
650     run_owning_block_test<64, Padding>(r, pool);
651     run_owning_block_test<128, Padding>(r, pool);
652 }
653 
DEF_TEST(SkBlockAllocatorOwningBlock,r)654 DEF_TEST(SkBlockAllocatorOwningBlock, r) {
655     SkSBlockAllocator<1024> pool{};
656     SkDEBUGCODE(pool->validate();)
657 
658     run_owning_block_tests<1>(r, pool.allocator());
659     run_owning_block_tests<2>(r, pool.allocator());
660     run_owning_block_tests<4>(r, pool.allocator());
661     run_owning_block_tests<8>(r, pool.allocator());
662     run_owning_block_tests<16>(r, pool.allocator());
663     run_owning_block_tests<32>(r, pool.allocator());
664 
665     // And some weird numbers
666     run_owning_block_tests<3>(r, pool.allocator());
667     run_owning_block_tests<9>(r, pool.allocator());
668     run_owning_block_tests<17>(r, pool.allocator());
669 }
670