1 //
2 // Copyright 2019 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // PoolAlloc.cpp:
7 // Implements the class methods for PoolAllocator and Allocation classes.
8 //
9
10 #include "common/PoolAlloc.h"
11
12 #include <assert.h>
13 #include <stdint.h>
14 #include <stdio.h>
15
16 #include "common/angleutils.h"
17 #include "common/debug.h"
18 #include "common/mathutil.h"
19 #include "common/platform.h"
20 #include "common/tls.h"
21
22 #if defined(ANGLE_WITH_ASAN)
23 # include <sanitizer/asan_interface.h>
24 #endif
25
26 namespace angle
27 {
28 // If we are using guard blocks, we must track each individual allocation. If we aren't using guard
29 // blocks, these never get instantiated, so won't have any impact.
30
31 class Allocation
32 {
33 public:
Allocation(size_t size,unsigned char * mem,Allocation * prev=0)34 Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
35 : mSize(size), mMem(mem), mPrevAlloc(prev)
36 {
37 // Allocations are bracketed:
38 //
39 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
40 //
41 // This would be cleaner with if (kGuardBlockSize)..., but that makes the compiler print
42 // warnings about 0 length memsets, even with the if() protecting them.
43 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
44 memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
45 memset(data(), kUserDataFill, mSize);
46 memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
47 #endif
48 }
49
50 void checkAllocList() const;
51
AlignedHeaderSize(uint8_t * allocationBasePtr,size_t alignment)52 static size_t AlignedHeaderSize(uint8_t *allocationBasePtr, size_t alignment)
53 {
54 // Make sure that the data offset after the header is aligned to the given alignment.
55 size_t base = reinterpret_cast<size_t>(allocationBasePtr);
56 return rx::roundUpPow2(base + kGuardBlockSize + HeaderSize(), alignment) - base;
57 }
58
59 // Return total size needed to accommodate user buffer of 'size',
60 // plus our tracking data and any necessary alignments.
AllocationSize(uint8_t * allocationBasePtr,size_t size,size_t alignment,size_t * preAllocationPaddingOut)61 static size_t AllocationSize(uint8_t *allocationBasePtr,
62 size_t size,
63 size_t alignment,
64 size_t *preAllocationPaddingOut)
65 {
66 // The allocation will be laid out as such:
67 //
68 // Aligned to |alignment|
69 // ^
70 // preAllocationPaddingOut |
71 // ___^___ |
72 // / \ |
73 // <padding>[header][guard][data][guard]
74 // \___________ __________/
75 // V
76 // dataOffset
77 //
78 // Note that alignment is at least as much as a pointer alignment, so the pointers in the
79 // header are also necessarily aligned appropriately.
80 //
81 size_t dataOffset = AlignedHeaderSize(allocationBasePtr, alignment);
82 *preAllocationPaddingOut = dataOffset - HeaderSize() - kGuardBlockSize;
83
84 return dataOffset + size + kGuardBlockSize;
85 }
86
87 // Given memory pointing to |header|, returns |data|.
GetDataPointer(uint8_t * memory,size_t alignment)88 static uint8_t *GetDataPointer(uint8_t *memory, size_t alignment)
89 {
90 uint8_t *alignedPtr = memory + kGuardBlockSize + HeaderSize();
91
92 // |memory| must be aligned already such that user data is aligned to |alignment|.
93 ASSERT((reinterpret_cast<uintptr_t>(alignedPtr) & (alignment - 1)) == 0);
94
95 return alignedPtr;
96 }
97
98 private:
99 void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
100
checkAlloc() const101 void checkAlloc() const
102 {
103 checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
104 checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
105 }
106
107 // Find offsets to pre and post guard blocks, and user data buffer
preGuard() const108 unsigned char *preGuard() const { return mMem + HeaderSize(); }
data() const109 unsigned char *data() const { return preGuard() + kGuardBlockSize; }
postGuard() const110 unsigned char *postGuard() const { return data() + mSize; }
111 size_t mSize; // size of the user data area
112 unsigned char *mMem; // beginning of our allocation (points to header)
113 Allocation *mPrevAlloc; // prior allocation in the chain
114
115 static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
116 static constexpr unsigned char kGuardBlockEndVal = 0xfe;
117 static constexpr unsigned char kUserDataFill = 0xcd;
118 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
119 static constexpr size_t kGuardBlockSize = 16;
HeaderSize()120 static constexpr size_t HeaderSize() { return sizeof(Allocation); }
121 #else
122 static constexpr size_t kGuardBlockSize = 0;
HeaderSize()123 static constexpr size_t HeaderSize() { return 0; }
124 #endif
125 };
126
127 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
128 class PageHeader
129 {
130 public:
PageHeader(PageHeader * nextPage,size_t pageCount)131 PageHeader(PageHeader *nextPage, size_t pageCount)
132 : nextPage(nextPage),
133 pageCount(pageCount)
134 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
135 ,
136 lastAllocation(nullptr)
137 # endif
138 {}
139
~PageHeader()140 ~PageHeader()
141 {
142 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
143 if (lastAllocation)
144 {
145 lastAllocation->checkAllocList();
146 }
147 # endif
148 }
149
150 PageHeader *nextPage;
151 size_t pageCount;
152 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
153 Allocation *lastAllocation;
154 # endif
155 };
156 #endif
157
158 //
159 // Implement the functionality of the PoolAllocator class, which
160 // is documented in PoolAlloc.h.
161 //
PoolAllocator(int growthIncrement,int allocationAlignment)162 PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
163 : mAlignment(allocationAlignment),
164 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
165 mPageSize(growthIncrement),
166 mFreeList(nullptr),
167 mInUseList(nullptr),
168 mNumCalls(0),
169 mTotalBytes(0),
170 #endif
171 mLocked(false)
172 {
173 initialize(growthIncrement, allocationAlignment);
174 }
175
initialize(int pageSize,int alignment)176 void PoolAllocator::initialize(int pageSize, int alignment)
177 {
178 mAlignment = alignment;
179 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
180 mPageSize = pageSize;
181 mPageHeaderSkip = sizeof(PageHeader);
182
183 // Alignment == 1 is a special fast-path where fastAllocate() is enabled
184 if (mAlignment != 1)
185 {
186 #endif
187 // Adjust mAlignment to be at least pointer aligned and
188 // power of 2.
189 //
190 size_t minAlign = sizeof(void *);
191 if (mAlignment < minAlign)
192 {
193 mAlignment = minAlign;
194 }
195 mAlignment = gl::ceilPow2(static_cast<unsigned int>(mAlignment));
196 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
197 }
198 //
199 // Don't allow page sizes we know are smaller than all common
200 // OS page sizes.
201 //
202 if (mPageSize < 4 * 1024)
203 {
204 mPageSize = 4 * 1024;
205 }
206
207 //
208 // A large mCurrentPageOffset indicates a new page needs to
209 // be obtained to allocate memory.
210 //
211 mCurrentPageOffset = mPageSize;
212
213 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
214 mStack.push_back({});
215 #endif
216 }
217
~PoolAllocator()218 PoolAllocator::~PoolAllocator()
219 {
220 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
221 while (mInUseList)
222 {
223 PageHeader *next = mInUseList->nextPage;
224 mInUseList->~PageHeader();
225 delete[] reinterpret_cast<char *>(mInUseList);
226 mInUseList = next;
227 }
228 // We should not check the guard blocks
229 // here, because we did it already when the block was
230 // placed into the free list.
231 //
232 while (mFreeList)
233 {
234 PageHeader *next = mFreeList->nextPage;
235 delete[] reinterpret_cast<char *>(mFreeList);
236 mFreeList = next;
237 }
238 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
239 for (auto &allocs : mStack)
240 {
241 for (auto alloc : allocs)
242 {
243 free(alloc);
244 }
245 }
246 mStack.clear();
247 #endif
248 }
249
250 //
251 // Check a single guard block for damage
252 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const253 void Allocation::checkGuardBlock(unsigned char *blockMem,
254 unsigned char val,
255 const char *locText) const
256 {
257 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
258 for (size_t x = 0; x < kGuardBlockSize; x++)
259 {
260 if (blockMem[x] != val)
261 {
262 char assertMsg[80];
263 // We don't print the assert message. It's here just to be helpful.
264 snprintf(assertMsg, sizeof(assertMsg),
265 "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
266 assert(0 && "PoolAlloc: Damage in guard block");
267 }
268 }
269 #endif
270 }
271
push()272 void PoolAllocator::push()
273 {
274 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
275 AllocState state = {mCurrentPageOffset, mInUseList};
276
277 mStack.push_back(state);
278
279 //
280 // Indicate there is no current page to allocate from.
281 //
282 mCurrentPageOffset = mPageSize;
283 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
284 mStack.push_back({});
285 #endif
286 }
287
288 // Do a mass-deallocation of all the individual allocations that have occurred since the last
289 // push(), or since the last pop(), or since the object's creation.
290 //
291 // Single-page allocations are saved for future use unless the release strategy is All.
pop(ReleaseStrategy releaseStrategy)292 void PoolAllocator::pop(ReleaseStrategy releaseStrategy)
293 {
294 if (mStack.size() < 1)
295 {
296 return;
297 }
298
299 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
300 PageHeader *page = mStack.back().page;
301 mCurrentPageOffset = mStack.back().offset;
302
303 while (mInUseList != page)
304 {
305 // Grab the pageCount before calling the destructor. While the destructor doesn't actually
306 // touch this variable, it's confusing MSAN.
307 const size_t pageCount = mInUseList->pageCount;
308 PageHeader *nextInUse = mInUseList->nextPage;
309
310 // invoke destructor to free allocation list
311 mInUseList->~PageHeader();
312
313 if (pageCount > 1 || releaseStrategy == ReleaseStrategy::All)
314 {
315 delete[] reinterpret_cast<char *>(mInUseList);
316 }
317 else
318 {
319 # if defined(ANGLE_WITH_ASAN)
320 // Clear any container annotations left over from when the memory
321 // was last used. (crbug.com/1419798)
322 __asan_unpoison_memory_region(mInUseList, mPageSize);
323 # endif
324 mInUseList->nextPage = mFreeList;
325 mFreeList = mInUseList;
326 }
327 mInUseList = nextInUse;
328 }
329
330 mStack.pop_back();
331 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
332 for (auto &alloc : mStack.back())
333 {
334 free(alloc);
335 }
336 mStack.pop_back();
337 #endif
338 }
339
340 //
341 // Do a mass-deallocation of all the individual allocations
342 // that have occurred.
343 //
popAll()344 void PoolAllocator::popAll()
345 {
346 while (mStack.size() > 0)
347 pop();
348 }
349
allocate(size_t numBytes)350 void *PoolAllocator::allocate(size_t numBytes)
351 {
352 ASSERT(!mLocked);
353
354 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
355 //
356 // Just keep some interesting statistics.
357 //
358 ++mNumCalls;
359 mTotalBytes += numBytes;
360
361 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
362
363 size_t preAllocationPadding = 0;
364 size_t allocationSize =
365 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
366
367 // Integer overflow is unexpected.
368 ASSERT(allocationSize >= numBytes);
369
370 // Do the allocation, most likely case first, for efficiency.
371 if (allocationSize <= mPageSize - mCurrentPageOffset)
372 {
373 // There is enough room to allocate from the current page at mCurrentPageOffset.
374 uint8_t *memory = currentPagePtr + preAllocationPadding;
375 mCurrentPageOffset += allocationSize;
376
377 return initializeAllocation(memory, numBytes);
378 }
379
380 if (allocationSize > mPageSize - mPageHeaderSkip)
381 {
382 // If the allocation is larger than a whole page, do a multi-page allocation. These are not
383 // mixed with the others. The OS is efficient in allocating and freeing multiple pages.
384
385 // We don't know what the alignment of the new allocated memory will be, so conservatively
386 // allocate enough memory for up to alignment extra bytes being needed.
387 allocationSize = Allocation::AllocationSize(reinterpret_cast<uint8_t *>(mPageHeaderSkip),
388 numBytes, mAlignment, &preAllocationPadding);
389
390 size_t numBytesToAlloc = allocationSize + mPageHeaderSkip + mAlignment;
391
392 // Integer overflow is unexpected.
393 ASSERT(numBytesToAlloc >= allocationSize);
394
395 PageHeader *memory = reinterpret_cast<PageHeader *>(::new char[numBytesToAlloc]);
396 if (memory == nullptr)
397 {
398 return nullptr;
399 }
400
401 // Use placement-new to initialize header
402 new (memory) PageHeader(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
403 mInUseList = memory;
404
405 // Make next allocation come from a new page
406 mCurrentPageOffset = mPageSize;
407
408 // Now that we actually have the pointer, make sure the data pointer will be aligned.
409 currentPagePtr = reinterpret_cast<uint8_t *>(memory) + mPageHeaderSkip;
410 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
411
412 return initializeAllocation(currentPagePtr + preAllocationPadding, numBytes);
413 }
414
415 uint8_t *newPageAddr = allocateNewPage(numBytes);
416 return initializeAllocation(newPageAddr, numBytes);
417
418 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
419
420 void *alloc = malloc(numBytes + mAlignment - 1);
421 mStack.back().push_back(alloc);
422
423 intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
424 intAlloc = rx::roundUpPow2<intptr_t>(intAlloc, mAlignment);
425 return reinterpret_cast<void *>(intAlloc);
426 #endif
427 }
428
429 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
allocateNewPage(size_t numBytes)430 uint8_t *PoolAllocator::allocateNewPage(size_t numBytes)
431 {
432 // Need a simple page to allocate from. Pick a page from the free list, if any. Otherwise need
433 // to make the allocation.
434 PageHeader *memory;
435 if (mFreeList)
436 {
437 memory = mFreeList;
438 mFreeList = mFreeList->nextPage;
439 }
440 else
441 {
442 memory = reinterpret_cast<PageHeader *>(::new char[mPageSize]);
443 if (memory == nullptr)
444 {
445 return nullptr;
446 }
447 }
448 // Use placement-new to initialize header
449 new (memory) PageHeader(mInUseList, 1);
450 mInUseList = memory;
451
452 // Leave room for the page header.
453 mCurrentPageOffset = mPageHeaderSkip;
454 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
455
456 size_t preAllocationPadding = 0;
457 size_t allocationSize =
458 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
459
460 mCurrentPageOffset += allocationSize;
461
462 // The new allocation is made after the page header and any alignment required before it.
463 return reinterpret_cast<uint8_t *>(mInUseList) + mPageHeaderSkip + preAllocationPadding;
464 }
465
initializeAllocation(uint8_t * memory,size_t numBytes)466 void *PoolAllocator::initializeAllocation(uint8_t *memory, size_t numBytes)
467 {
468 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
469 new (memory) Allocation(numBytes, memory, mInUseList->lastAllocation);
470 mInUseList->lastAllocation = reinterpret_cast<Allocation *>(memory);
471 # endif
472
473 return Allocation::GetDataPointer(memory, mAlignment);
474 }
475 #endif
476
lock()477 void PoolAllocator::lock()
478 {
479 ASSERT(!mLocked);
480 mLocked = true;
481 }
482
unlock()483 void PoolAllocator::unlock()
484 {
485 ASSERT(mLocked);
486 mLocked = false;
487 }
488
489 //
490 // Check all allocations in a list for damage by calling check on each.
491 //
checkAllocList() const492 void Allocation::checkAllocList() const
493 {
494 for (const Allocation *alloc = this; alloc != nullptr; alloc = alloc->mPrevAlloc)
495 {
496 alloc->checkAlloc();
497 }
498 }
499
500 } // namespace angle
501