1 /*
2 * Copyright 2020 Axel Davy <[email protected]>
3 * SPDX-License-Identifier: MIT
4 */
5
6 /*
7 * Memory util function to allocate RAM backing for textures.
8 * DEFAULT textures are stored on GPU
9 * MANAGED textures have a RAM backing and upload the content to a GPU texture for use
10 * SYSTEMMEM textures are stored in RAM and are meant to be uploaded to DEFAULT textures.
11 * Basically SYSTEMMEM + DEFAULT enables to do manually what MANAGED does automatically.
12 *
13 * Once the GPU texture is created, the RAM backing of MANAGED textures can be used in
14 * two occasions:
15 * . Recreating the GPU texture (for example lod change, or GPU memory eviction)
16 * . Reading the texture content (some games do that to fill higher res versions of the texture)
17 *
18 * When a lot of textures are used, the amount of addressing space (virtual memory) taken by MANAGED
19 * and SYSTEMMEM textures can be significant and cause virtual memory exhaustion for 32 bits programs.
20 *
21 * One way to reduce the virtual memory taken is to ignore lod and delete the RAM backing of
22 * MANAGED textures once it is uploaded. If the texture is read, or evicted from GPU memory, the RAM
23 * backing would be recreated (Note that mapping the GPU memory is not acceptable as RAM memory is supposed
24 * to have smaller (fixed) stride constraints).
25 *
26 * Instead the approach taken here is to keep the RAM backing alive, but free its addressing space.
27 * In other words virtual memory usage is reduced, but the RAM usage of the app is the same.
28 * To do so, we use the memfd feature of the linux kernel. It enables to allocate a file
29 * stored in RAM and visible only to the app. We can map/unmap portions of the file as we need.
30 * When a portion is mapped, it takes virtual memory space. When it is not, it doesn't.
31 * The file is stored in RAM, and thus the access speed is the same as normal RAM. Using such
32 * file to allocate data enables to use more than 4GB RAM on 32 bits.
33 *
34 * This approach adds some overhead: when accessing mapped content the first time, pages are allocated
35 * by the system. This has a lot of overhead (several times the time to memset the area).
36 * Releasing these pages (when unmapping) has overhead too, though significantly less.
37 *
38 * This overhead however is much less significant than the overhead of downloading the GPU content.
39 * In addition, we reduce significantly the overhead spent in Gallium nine for new allocations by
40 * using the fact new contents of the file are zero-allocated. By not calling memset in Gallium nine,
41 * the overhead of page allocation happens client side, thus outside the d3d mutex. This should give
42 * a performance boost for multithreaded applications. As malloc also has this overhead (at least for
43 * large enough allocations which use mmap internally), allocating ends up faster than with the standard
44 * allocation path.
45 * By far the overhead induced by page allocation/deallocation is the biggest overhead involved in this
46 * code. It is reduced significantly with huge pages, but it is too complex to configure for the user
47 * to use it (and it has some memory management downsides too). The memset trick enables to move most of
48 * the overhead outside Nine anyway.
49 *
50 * To prevent useless unmappings quickly followed by mapping again, we do not unmap right away allocations
51 * that are not locked for access anymore. Indeed it is likely the allocation will be accessed several times
52 * in a row, for example first to fill it, then to upload it.
53 * We keep everything mapped until we reach a threshold of memory allocated. Then we use hints to prioritize
54 * which regions to unmap first. Thus virtual memory usage is only reduced when the threshold is reached.
55 *
56 * Multiple memfd files are used, each of 100MB. Thus memory usage (but not virtual memory usage) increases
57 * by amounts of 100MB. When not on x86 32 bits, we do use the standard malloc.
58 *
59 * Finally, for ease of use, we do not implement packing of allocation inside page-aligned regions.
60 * One allocation is given one page-aligned region inside a memfd file.
61 * Allocations smaller than a page (4KB on x86) go through malloc.
62 * As texture sizes are usually multiples of powers of two, allocations above the page size are typically
63 * multiples of the page size, thus space is not wasted in practice.
64 *
65 */
66
67 #include <errno.h>
68 #include <fcntl.h>
69 #include <limits.h>
70 #include <linux/memfd.h>
71 #include <pthread.h>
72 #include <stdio.h>
73 #include <sys/mman.h>
74 #include <sys/types.h>
75 #include <sys/stat.h>
76 #include <ulimit.h>
77 #include <unistd.h>
78
79 #include "util/list.h"
80 #include "util/u_memory.h"
81 #include "util/slab.h"
82
83 #include "nine_debug.h"
84 #include "nine_memory_helper.h"
85 #include "nine_state.h"
86
87
88 #define DIVUP(a,b) (((a)+(b)-1)/(b))
89
90 /* Required alignment for allocations */
91 #define NINE_ALLOCATION_ALIGNMENT 32
92
93 #define DBG_CHANNEL (DBG_BASETEXTURE|DBG_SURFACE|DBG_VOLUME|DBG_TEXTURE|DBG_CUBETEXTURE)
94
95 /* Use memfd only for 32 bits. Check for memfd_create support */
96 #if DETECT_ARCH_X86 && defined(HAVE_MEMFD_CREATE)
97 #define NINE_ENABLE_MEMFD
98 #endif
99
100 #ifdef NINE_ENABLE_MEMFD
101
102 struct nine_memfd_file_region {
103 unsigned offset;
104 unsigned size;
105 void *map; /* pointer to the mapped content of the file. Can be NULL */
106 int num_locks; /* Total number of locks blocking the munmap */
107 int num_weak_unlocks; /* Number of users which weakly block the munmap */
108 bool zero_filled;
109 struct list_head list;
110 };
111
112 struct nine_memfd_file {
113 int fd;
114 int filesize; /* Size of the file */
115 struct list_head free_regions; /* This list is sorted by the offset, and consecutive regions are merged */
116 struct list_head unmapped_allocated_regions; /* This list and the following ones are not sorted */
117 struct list_head locked_mapped_allocated_regions;
118 struct list_head weak_unlocked_mapped_allocated_regions;
119 struct list_head unlocked_mapped_allocated_regions;
120 };
121
122 /* The allocation is stored inside a memfd */
123 #define NINE_MEMFD_ALLOC 1
124 /* The allocation is part of another allocation, which is stored inside a memfd */
125 #define NINE_MEMFD_SUBALLOC 2
126 /* The allocation was allocated with malloc and will have to be freed */
127 #define NINE_MALLOC_ALLOC 3
128 /* The pointer doesn't need memory management */
129 #define NINE_EXTERNAL_ALLOC 4
130
131 struct nine_memfd_allocation {
132 struct nine_memfd_file *file; /* File in which the data is allocated */
133 struct nine_memfd_file_region *region; /* Corresponding file memory region. Max 1 allocation per region */
134 };
135
136 /* 'Suballocations' are used to represent subregions of an allocation.
137 * For example a given layer of a texture. These are not allocations,
138 * but can be accessed separately. To correctly handle accessing them,
139 * we encapsulate them into this structure. */
140 struct nine_memfd_suballocation {
141 struct nine_memfd_allocation *parent; /* Parent allocation */
142 int relative_offset; /* Offset relative to the parent */
143 };
144
145 /* A standard allocation with malloc */
146 struct nine_malloc_allocation {
147 void *buf;
148 unsigned allocation_size;
149 };
150
151 /* A pointer with no need of memory management.
152 * For example a pointer passed by the application,
153 * or a 'suballocation' inside a malloc-ed allocation. */
154 struct nine_external_allocation {
155 void *buf;
156 };
157
158 /* Encapsulates all allocations */
159 struct nine_allocation {
160 unsigned allocation_type; /* Type of allocation */
161 union {
162 struct nine_memfd_allocation memfd;
163 struct nine_memfd_suballocation submemfd;
164 struct nine_malloc_allocation malloc;
165 struct nine_external_allocation external;
166 } memory;
167 struct list_head list_free; /* for pending frees */
168 /* The fields below are only used for memfd/submemfd allocations */
169 struct list_head list_release; /* for pending releases */
170 /* Handling of the CSMT thread:
171 * API calls are singled thread (global mutex protection).
172 * However we multithreading internally (CSMT worker thread).
173 * To handle this thread, we map/lock the allocation in the
174 * main thread and increase pending_counter. When the worker thread
175 * is done with the scheduled function, the pending_counter is decreased.
176 * If pending_counter is 0, locks_on_counter can be subtracted from
177 * active_locks (in the main thread). */
178 unsigned locks_on_counter;
179 unsigned *pending_counter;
180 /* Hint from the last unlock indicating the data might be locked again soon */
181 bool weak_unlock;
182 };
183
184 struct nine_allocator {
185 struct NineDevice9 *device;
186 int page_size; /* Page size */
187 int num_fd_max; /* Max number of memfd files */
188 int min_file_size; /* Minimum memfd file size */
189 /* Tracking of all allocations */
190 long long total_allocations; /* Amount of memory allocated */
191 long long total_locked_memory; /* TODO */ /* Amount of memory blocked by a lock */
192 long long total_virtual_memory; /* Current virtual memory used by our allocations */
193 long long total_virtual_memory_limit; /* Target maximum virtual memory used. Above that, tries to unmap memfd files whenever possible. */
194
195 int num_fd; /* Number of memfd files */ /* TODO release unused memfd files */
196 struct slab_mempool allocation_pool;
197 struct slab_mempool region_pool;
198 struct nine_memfd_file *memfd_pool; /* Table (of size num_fd) of memfd files */
199 struct list_head pending_releases; /* List of allocations with unlocks depending on pending_counter */ /* TODO: Elements seem removed only on flush. Destruction ? */
200
201 pthread_mutex_t mutex_pending_frees;
202 struct list_head pending_frees;
203 };
204
205 #if MESA_DEBUG
206
207 static void
debug_dump_memfd_state(struct nine_memfd_file * memfd_file,bool details)208 debug_dump_memfd_state(struct nine_memfd_file *memfd_file, bool details)
209 {
210 struct nine_memfd_file_region *region;
211
212 DBG("fd: %d, filesize: %d\n", memfd_file->fd, memfd_file->filesize);
213 if (!details)
214 return;
215 LIST_FOR_EACH_ENTRY(region, &memfd_file->free_regions, list) {
216 DBG("FREE block: offset %d, size %d, map=%p, locks=%d, weak=%d, z=%d\n",
217 region->offset, region->size, region->map,
218 region->num_locks, region->num_weak_unlocks, (int)region->zero_filled);
219 }
220 LIST_FOR_EACH_ENTRY(region, &memfd_file->unmapped_allocated_regions, list) {
221 DBG("UNMAPPED ALLOCATED block: offset %d, size %d, map=%p, locks=%d, weak=%d, z=%d\n",
222 region->offset, region->size, region->map,
223 region->num_locks, region->num_weak_unlocks, (int)region->zero_filled);
224 }
225 LIST_FOR_EACH_ENTRY(region, &memfd_file->locked_mapped_allocated_regions, list) {
226 DBG("LOCKED MAPPED ALLOCATED block: offset %d, size %d, map=%p, locks=%d, weak=%d, z=%d\n",
227 region->offset, region->size, region->map,
228 region->num_locks, region->num_weak_unlocks, (int)region->zero_filled);
229 }
230 LIST_FOR_EACH_ENTRY(region, &memfd_file->unlocked_mapped_allocated_regions, list) {
231 DBG("UNLOCKED MAPPED ALLOCATED block: offset %d, size %d, map=%p, locks=%d, weak=%d, z=%d\n",
232 region->offset, region->size, region->map,
233 region->num_locks, region->num_weak_unlocks, (int)region->zero_filled);
234 }
235 LIST_FOR_EACH_ENTRY(region, &memfd_file->weak_unlocked_mapped_allocated_regions, list) {
236 DBG("WEAK UNLOCKED MAPPED ALLOCATED block: offset %d, size %d, map=%p, locks=%d, weak=%d, z=%d\n",
237 region->offset, region->size, region->map,
238 region->num_locks, region->num_weak_unlocks, (int)region->zero_filled);
239 }
240 }
241
242 static void
debug_dump_allocation_state(struct nine_allocation * allocation)243 debug_dump_allocation_state(struct nine_allocation *allocation)
244 {
245 switch(allocation->allocation_type) {
246 case NINE_MEMFD_ALLOC:
247 DBG("Allocation is stored in this memfd file:\n");
248 debug_dump_memfd_state(allocation->memory.memfd.file, true);
249 DBG("Allocation is offset: %d, size: %d\n",
250 allocation->memory.memfd.region->offset, allocation->memory.memfd.region->size);
251 break;
252 case NINE_MEMFD_SUBALLOC:
253 DBG("Allocation is suballocation at relative offset %d of this allocation:\n",
254 allocation->memory.submemfd.relative_offset);
255 DBG("Parent allocation is stored in this memfd file:\n");
256 debug_dump_memfd_state(allocation->memory.submemfd.parent->file, false);
257 DBG("Parent allocation is offset: %d, size: %d\n",
258 allocation->memory.submemfd.parent->region->offset,
259 allocation->memory.submemfd.parent->region->size);
260 break;
261 case NINE_MALLOC_ALLOC:
262 DBG("Allocation is a standard malloc\n");
263 break;
264 case NINE_EXTERNAL_ALLOC:
265 DBG("Allocation is a suballocation of a standard malloc or an external allocation\n");
266 break;
267 default:
268 assert(false);
269 }
270 }
271
272 #else
273
274 static void
debug_dump_memfd_state(struct nine_memfd_file * memfd_file,bool details)275 debug_dump_memfd_state(struct nine_memfd_file *memfd_file, bool details)
276 {
277 (void)memfd_file;
278 (void)details;
279 }
280
281 static void
debug_dump_allocation_state(struct nine_allocation * allocation)282 debug_dump_allocation_state(struct nine_allocation *allocation)
283 {
284 (void)allocation;
285 }
286
287 #endif
288
289 static void
debug_dump_allocator_state(struct nine_allocator * allocator)290 debug_dump_allocator_state(struct nine_allocator *allocator)
291 {
292 DBG("SURFACE ALLOCATOR STATUS:\n");
293 DBG("Total allocated: %lld\n", allocator->total_allocations);
294 DBG("Total virtual memory locked: %lld\n", allocator->total_locked_memory);
295 DBG("Virtual memory used: %lld / %lld\n", allocator->total_virtual_memory, allocator->total_virtual_memory_limit);
296 DBG("Num memfd files: %d / %d\n", allocator->num_fd, allocator->num_fd_max);
297 }
298
299
300 /* Retrieve file used for the storage of the content of this allocation.
301 * NULL if not using memfd */
302 static struct nine_memfd_file *
nine_get_memfd_file_backing(struct nine_allocation * allocation)303 nine_get_memfd_file_backing(struct nine_allocation *allocation)
304 {
305 if (allocation->allocation_type > NINE_MEMFD_SUBALLOC)
306 return NULL;
307 if (allocation->allocation_type == NINE_MEMFD_ALLOC)
308 return allocation->memory.memfd.file;
309 return allocation->memory.submemfd.parent->file;
310 }
311
312 /* Retrieve region used for the storage of the content of this allocation.
313 * NULL if not using memfd */
314 static struct nine_memfd_file_region *
nine_get_memfd_region_backing(struct nine_allocation * allocation)315 nine_get_memfd_region_backing(struct nine_allocation *allocation)
316 {
317 if (allocation->allocation_type > NINE_MEMFD_SUBALLOC)
318 return NULL;
319 if (allocation->allocation_type == NINE_MEMFD_ALLOC)
320 return allocation->memory.memfd.region;
321 return allocation->memory.submemfd.parent->region;
322 }
323
move_region(struct list_head * tail,struct nine_memfd_file_region * region)324 static void move_region(struct list_head *tail, struct nine_memfd_file_region *region)
325 {
326 /* Remove from previous list (if any) */
327 list_delinit(®ion->list);
328 /* Insert in new list (last) */
329 list_addtail(®ion->list, tail);
330 }
331
332 #if 0
333 static void move_region_ordered(struct list_head *tail, struct nine_memfd_file_region *region)
334 {
335 struct nine_memfd_file_region *cur_region;
336 struct list_head *insertion_point = tail;
337
338 /* Remove from previous list (if any) */
339 list_delinit(®ion->list);
340
341 LIST_FOR_EACH_ENTRY(cur_region, tail, list) {
342 if (cur_region->offset > region->offset)
343 break;
344 insertion_point = &cur_region->list;
345 }
346 /* Insert just before cur_region */
347 list_add(®ion->list, insertion_point);
348 }
349 #endif
350
move_region_ordered_merge(struct nine_allocator * allocator,struct list_head * tail,struct nine_memfd_file_region * region)351 static void move_region_ordered_merge(struct nine_allocator *allocator, struct list_head *tail, struct nine_memfd_file_region *region)
352 {
353 struct nine_memfd_file_region *p, *cur_region = NULL, *prev_region = NULL;
354
355 /* Remove from previous list (if any) */
356 list_delinit(®ion->list);
357
358 LIST_FOR_EACH_ENTRY(p, tail, list) {
359 cur_region = p;
360 if (cur_region->offset > region->offset)
361 break;
362 prev_region = cur_region;
363 }
364
365 /* Insert after prev_region and before cur_region. Try to merge */
366 if (prev_region && ((prev_region->offset + prev_region->size) == region->offset)) {
367 if (cur_region && (cur_region->offset == (region->offset + region->size))) {
368 /* Merge all three regions */
369 prev_region->size += region->size + cur_region->size;
370 prev_region->zero_filled = prev_region->zero_filled && region->zero_filled && cur_region->zero_filled;
371 list_del(&cur_region->list);
372 slab_free_st(&allocator->region_pool, region);
373 slab_free_st(&allocator->region_pool, cur_region);
374 } else {
375 prev_region->size += region->size;
376 prev_region->zero_filled = prev_region->zero_filled && region->zero_filled;
377 slab_free_st(&allocator->region_pool, region);
378 }
379 } else if (cur_region && (cur_region->offset == (region->offset + region->size))) {
380 cur_region->offset = region->offset;
381 cur_region->size += region->size;
382 cur_region->zero_filled = region->zero_filled && cur_region->zero_filled;
383 slab_free_st(&allocator->region_pool, region);
384 } else {
385 list_add(®ion->list, prev_region ? &prev_region->list : tail);
386 }
387 }
388
allocate_region(struct nine_allocator * allocator,unsigned offset,unsigned size)389 static struct nine_memfd_file_region *allocate_region(struct nine_allocator *allocator, unsigned offset, unsigned size) {
390 struct nine_memfd_file_region *region = slab_alloc_st(&allocator->allocation_pool);
391 if (!region)
392 return NULL;
393 region->offset = offset;
394 region->size = size;
395 region->num_locks = 0;
396 region->num_weak_unlocks = 0;
397 region->map = NULL;
398 region->zero_filled = false;
399 list_inithead(®ion->list);
400 return region;
401 }
402
403 /* Go through memfd allocated files, and try to use unused memory for the requested allocation.
404 * Returns whether it succeeded */
405 static bool
insert_new_allocation(struct nine_allocator * allocator,struct nine_allocation * new_allocation,unsigned allocation_size)406 insert_new_allocation(struct nine_allocator *allocator, struct nine_allocation *new_allocation, unsigned allocation_size)
407 {
408 int memfd_index;
409 struct nine_memfd_file *memfd_file, *best_memfd_file;
410 struct nine_memfd_file_region *region, *best_region, *new_region;
411
412
413 /* Find the smallest - but bigger than the requested size - unused memory
414 * region inside the memfd files. */
415 int min_blocksize = INT_MAX;
416
417 for (memfd_index = 0; memfd_index < allocator->num_fd; memfd_index++) {
418 memfd_file = (void*)allocator->memfd_pool + memfd_index*sizeof(struct nine_memfd_file);
419
420 LIST_FOR_EACH_ENTRY(region, &memfd_file->free_regions, list) {
421 if (region->size <= min_blocksize && region->size >= allocation_size) {
422 min_blocksize = region->size;
423 best_region = region;
424 best_memfd_file = memfd_file;
425 }
426 }
427 if (min_blocksize == allocation_size)
428 break;
429 }
430
431 /* The allocation doesn't fit in any memfd file */
432 if (min_blocksize == INT_MAX)
433 return false;
434
435 /* Target region found */
436 /* Move from free to unmapped allocated */
437 best_region->size = DIVUP(allocation_size, allocator->page_size) * allocator->page_size;
438 assert(min_blocksize >= best_region->size);
439 move_region(&best_memfd_file->unmapped_allocated_regions, best_region);
440 new_allocation->memory.memfd.region = best_region;
441 new_allocation->memory.memfd.file = best_memfd_file;
442
443 /* If the original region is bigger than needed, add new region with remaining space */
444 min_blocksize -= best_region->size;
445 if (min_blocksize > 0) {
446 new_region = allocate_region(allocator, best_region->offset + best_region->size, min_blocksize);
447 new_region->zero_filled = best_region->zero_filled;
448 move_region_ordered_merge(allocator, &best_memfd_file->free_regions, new_region);
449 }
450 allocator->total_allocations += best_region->size;
451 return true;
452 }
453
454 /* Go through allocations with unlocks waiting on pending_counter being 0.
455 * If 0 is indeed reached, update the allocation status */
456 static void
nine_flush_pending_releases(struct nine_allocator * allocator)457 nine_flush_pending_releases(struct nine_allocator *allocator)
458 {
459 struct nine_allocation *allocation, *ptr;
460 LIST_FOR_EACH_ENTRY_SAFE(allocation, ptr, &allocator->pending_releases, list_release) {
461 assert(allocation->locks_on_counter > 0);
462 /* If pending_releases reached 0, remove from the list and update the status */
463 if (*allocation->pending_counter == 0) {
464 struct nine_memfd_file *memfd_file = nine_get_memfd_file_backing(allocation);
465 struct nine_memfd_file_region *region = nine_get_memfd_region_backing(allocation);
466 region->num_locks -= allocation->locks_on_counter;
467 allocation->locks_on_counter = 0;
468 list_delinit(&allocation->list_release);
469 if (region->num_locks == 0) {
470 /* Move to the correct list */
471 if (region->num_weak_unlocks)
472 move_region(&memfd_file->weak_unlocked_mapped_allocated_regions, region);
473 else
474 move_region(&memfd_file->unlocked_mapped_allocated_regions, region);
475 allocator->total_locked_memory -= region->size;
476 }
477 }
478 }
479 }
480
481 static void
482 nine_free_internal(struct nine_allocator *allocator, struct nine_allocation *allocation);
483
484 static void
nine_flush_pending_frees(struct nine_allocator * allocator)485 nine_flush_pending_frees(struct nine_allocator *allocator)
486 {
487 struct nine_allocation *allocation, *ptr;
488
489 pthread_mutex_lock(&allocator->mutex_pending_frees);
490 /* The order of release matters as suballocations are supposed to be released first */
491 LIST_FOR_EACH_ENTRY_SAFE(allocation, ptr, &allocator->pending_frees, list_free) {
492 /* Set the allocation in an unlocked state, and then free it */
493 if (allocation->allocation_type == NINE_MEMFD_ALLOC ||
494 allocation->allocation_type == NINE_MEMFD_SUBALLOC) {
495 struct nine_memfd_file *memfd_file = nine_get_memfd_file_backing(allocation);
496 struct nine_memfd_file_region *region = nine_get_memfd_region_backing(allocation);
497 if (region->num_locks != 0) {
498 region->num_locks = 0;
499 allocator->total_locked_memory -= region->size;
500 /* Useless, but to keep consistency */
501 move_region(&memfd_file->unlocked_mapped_allocated_regions, region);
502 }
503 region->num_weak_unlocks = 0;
504 allocation->weak_unlock = false;
505 allocation->locks_on_counter = 0;
506 list_delinit(&allocation->list_release);
507 }
508 list_delinit(&allocation->list_free);
509 nine_free_internal(allocator, allocation);
510 }
511 pthread_mutex_unlock(&allocator->mutex_pending_frees);
512 }
513
514 /* Try to unmap the memfd_index-th file if not already unmapped.
515 * If even_if_weak is False, will not unmap if there are weak unlocks */
516 static void
nine_memfd_unmap_region(struct nine_allocator * allocator,struct nine_memfd_file * memfd_file,struct nine_memfd_file_region * region)517 nine_memfd_unmap_region(struct nine_allocator *allocator,
518 struct nine_memfd_file *memfd_file,
519 struct nine_memfd_file_region *region)
520 {
521 DBG("Unmapping memfd mapped region at %d: size: %d, map=%p, locks=%d, weak=%d\n",
522 region->offset, region->size, region->map,
523 region->num_locks, region->num_weak_unlocks);
524 assert(region->map != NULL);
525
526 if (munmap(region->map, region->size) != 0)
527 fprintf(stderr, "Error on unmapping, errno=%d\n", (int)errno);
528
529 region->map = NULL;
530 /* Move from one of the mapped region list to the unmapped one */
531 move_region(&memfd_file->unmapped_allocated_regions, region);
532 allocator->total_virtual_memory -= region->size;
533 }
534
535 /* Unallocate a region of a memfd file */
536 static void
remove_allocation(struct nine_allocator * allocator,struct nine_memfd_file * memfd_file,struct nine_memfd_file_region * region)537 remove_allocation(struct nine_allocator *allocator, struct nine_memfd_file *memfd_file, struct nine_memfd_file_region *region)
538 {
539 assert(region->num_locks == 0);
540 region->num_weak_unlocks = 0;
541 /* Move from mapped region to unmapped region */
542 if (region->map) {
543 if (likely(!region->zero_filled)) {
544 /* As the region is mapped, it is likely the pages are allocated.
545 * Do the memset now for when we allocate again. It is much faster now,
546 * as the pages are allocated. */
547 DBG("memset on data=%p, size %d\n", region->map, region->size);
548 memset(region->map, 0, region->size);
549 region->zero_filled = true;
550 }
551 nine_memfd_unmap_region(allocator, memfd_file, region);
552 }
553 /* Move from unmapped region to free region */
554 allocator->total_allocations -= region->size;
555 move_region_ordered_merge(allocator, &memfd_file->free_regions, region);
556 }
557
558 /* Try to unmap the regions of the memfd_index-th file if not already unmapped.
559 * If even_if_weak is False, will not unmap if there are weak unlocks */
560 static void
nine_memfd_try_unmap_file(struct nine_allocator * allocator,int memfd_index,bool weak)561 nine_memfd_try_unmap_file(struct nine_allocator *allocator,
562 int memfd_index,
563 bool weak)
564 {
565 struct nine_memfd_file *memfd_file = (void*)allocator->memfd_pool + memfd_index*sizeof(struct nine_memfd_file);
566 struct nine_memfd_file_region *region, *ptr;
567 DBG("memfd file at %d: fd: %d, filesize: %d\n",
568 memfd_index, memfd_file->fd, memfd_file->filesize);
569 debug_dump_memfd_state(memfd_file, true);
570 LIST_FOR_EACH_ENTRY_SAFE(region, ptr,
571 weak ?
572 &memfd_file->weak_unlocked_mapped_allocated_regions :
573 &memfd_file->unlocked_mapped_allocated_regions,
574 list) {
575 nine_memfd_unmap_region(allocator, memfd_file, region);
576 }
577 }
578
579 /* Unmap files until we are below the virtual memory target limit.
580 * If unmap_everything_possible is set, ignore the limit and unmap
581 * all that can be unmapped. */
582 static void
nine_memfd_files_unmap(struct nine_allocator * allocator,bool unmap_everything_possible)583 nine_memfd_files_unmap(struct nine_allocator *allocator,
584 bool unmap_everything_possible)
585 {
586 long long memory_limit = unmap_everything_possible ?
587 0 : allocator->total_virtual_memory_limit;
588 int i;
589
590 /* We are below the limit. Do nothing */
591 if (memory_limit >= allocator->total_virtual_memory)
592 return;
593
594 /* Update allocations with pending releases */
595 nine_flush_pending_releases(allocator);
596
597 DBG("Trying to unmap files with no weak unlock (%lld / %lld)\n",
598 allocator->total_virtual_memory, memory_limit);
599
600 /* Try to release everything with no weak releases.
601 * Those have data not needed for a long time (and
602 * possibly ever). */
603 for (i = 0; i < allocator->num_fd; i++) {
604 nine_memfd_try_unmap_file(allocator, i, false);
605 if (memory_limit >= allocator->total_virtual_memory) {
606 return;}
607 }
608
609 DBG("Trying to unmap files even with weak unlocks (%lld / %lld)\n",
610 allocator->total_virtual_memory, memory_limit);
611
612 /* This wasn't enough. Also release files with weak releases */
613 for (i = 0; i < allocator->num_fd; i++) {
614 nine_memfd_try_unmap_file(allocator, i, true);
615 /* Stop if the target is reached */
616 if (memory_limit >= allocator->total_virtual_memory) {
617 return;}
618 }
619
620 if (!unmap_everything_possible)
621 return;
622
623 /* If there are some pending uploads, execute them,
624 * and retry. */
625 if (list_is_empty(&allocator->pending_releases)) {
626 return;}
627 nine_csmt_process(allocator->device);
628 nine_flush_pending_releases(allocator);
629
630 DBG("Retrying after flushing (%lld / %lld)\n",
631 allocator->total_virtual_memory, memory_limit);
632
633 for (i = 0; i < allocator->num_fd; i++) {
634 nine_memfd_try_unmap_file(allocator, i, false);
635 nine_memfd_try_unmap_file(allocator, i, true);
636 }
637 /* We have done all we could */
638 }
639
640 /* Map a given memfd file */
641 static bool
nine_memfd_region_map(struct nine_allocator * allocator,struct nine_memfd_file * memfd_file,struct nine_memfd_file_region * region)642 nine_memfd_region_map(struct nine_allocator *allocator, struct nine_memfd_file *memfd_file, struct nine_memfd_file_region *region)
643 {
644 if (region->map != NULL)
645 return true;
646
647 debug_dump_memfd_state(memfd_file, true);
648 nine_memfd_files_unmap(allocator, false);
649
650 void *buf = mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED, memfd_file->fd, region->offset);
651
652 if (buf == MAP_FAILED && errno == ENOMEM) {
653 DBG("Failed to mmap a memfd file - trying to unmap other files\n");
654 nine_memfd_files_unmap(allocator, true);
655 buf = mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED, memfd_file->fd, region->offset);
656 }
657 if (buf == MAP_FAILED) {
658 DBG("Failed to mmap a memfd file, errno=%d\n", (int)errno);
659 return false;
660 }
661 region->map = buf;
662 /* no need to move to an unlocked mapped regions list, the caller will handle the list */
663 allocator->total_virtual_memory += region->size;
664 assert((uintptr_t)buf % NINE_ALLOCATION_ALIGNMENT == 0); /* mmap should be page_size aligned, so it should be fine */
665
666 return true;
667 }
668
669 /* Allocate with memfd some memory. Returns True if successful. */
670 static bool
nine_memfd_allocator(struct nine_allocator * allocator,struct nine_allocation * new_allocation,unsigned allocation_size)671 nine_memfd_allocator(struct nine_allocator *allocator,
672 struct nine_allocation *new_allocation,
673 unsigned allocation_size)
674 {
675 struct nine_memfd_file *memfd_file;
676 struct nine_memfd_file_region *region;
677
678 allocation_size = DIVUP(allocation_size, allocator->page_size) * allocator->page_size;
679 new_allocation->allocation_type = NINE_MEMFD_ALLOC;
680 new_allocation->locks_on_counter = 0;
681 new_allocation->pending_counter = NULL;
682 new_allocation->weak_unlock = false;
683 list_inithead(&new_allocation->list_free);
684 list_inithead(&new_allocation->list_release);
685
686 /* Try to find free space in a file already allocated */
687 if (insert_new_allocation(allocator, new_allocation, allocation_size))
688 return true;
689
690 /* No - allocate new memfd file */
691
692 if (allocator->num_fd == allocator->num_fd_max)
693 return false; /* Too many memfd files */
694
695 allocator->num_fd++;
696 memfd_file = (void*)allocator->memfd_pool + (allocator->num_fd-1)*sizeof(struct nine_memfd_file);
697 /* If the allocation size is above the memfd file default size, use a bigger size */
698 memfd_file->filesize = MAX2(allocation_size, allocator->min_file_size);
699
700 memfd_file->fd = memfd_create("gallium_nine_ram", 0);
701 if (memfd_file->fd == -1) {
702 DBG("Failed to created a memfd file, errno=%d\n", (int)errno);
703 allocator->num_fd--;
704 return false;
705 }
706
707 if (ftruncate(memfd_file->fd, memfd_file->filesize) != 0) {
708 DBG("Failed to resize a memfd file, errno=%d\n", (int)errno);
709 close(memfd_file->fd);
710 allocator->num_fd--;
711 return false;
712 }
713
714 list_inithead(&memfd_file->free_regions);
715 list_inithead(&memfd_file->unmapped_allocated_regions);
716 list_inithead(&memfd_file->locked_mapped_allocated_regions);
717 list_inithead(&memfd_file->unlocked_mapped_allocated_regions);
718 list_inithead(&memfd_file->weak_unlocked_mapped_allocated_regions);
719
720 /* Initialize the memfd file with empty region and the allocation */
721 region = allocate_region(allocator, 0, allocation_size);
722 region->zero_filled = true; /* ftruncate does zero-fill the new data */
723 list_add(®ion->list, &memfd_file->unmapped_allocated_regions);
724 new_allocation->memory.memfd.file = memfd_file;
725 new_allocation->memory.memfd.region = region;
726 allocator->total_allocations += allocation_size;
727
728 if (allocation_size == memfd_file->filesize)
729 return true;
730
731 /* Add empty region */
732 region = allocate_region(allocator, allocation_size, memfd_file->filesize - allocation_size);
733 region->zero_filled = true; /* ftruncate does zero-fill the new data */
734 list_add(®ion->list, &memfd_file->free_regions);
735
736 return true;
737 }
738
739 /* Allocate memory */
740 struct nine_allocation *
nine_allocate(struct nine_allocator * allocator,unsigned size)741 nine_allocate(struct nine_allocator *allocator, unsigned size)
742 {
743
744 struct nine_allocation *new_allocation = slab_alloc_st(&allocator->allocation_pool);
745 debug_dump_allocator_state(allocator);
746 if (!new_allocation)
747 return NULL;
748
749 nine_flush_pending_frees(allocator);
750
751 /* Restrict to >= page_size to prevent having too much fragmentation, as the size of
752 * allocations is rounded to the next page_size multiple. */
753 if (size >= allocator->page_size && allocator->total_virtual_memory_limit >= 0 &&
754 nine_memfd_allocator(allocator, new_allocation, size)) {
755 struct nine_memfd_file_region *region = new_allocation->memory.memfd.region;
756 if (!region->zero_filled) {
757 void *data = nine_get_pointer(allocator, new_allocation);
758 if (!data) {
759 ERR("INTERNAL MMAP FOR NEW ALLOCATION FAILED\n");
760 nine_free(allocator, new_allocation);
761 return NULL;
762 }
763 DBG("memset on data=%p, size %d\n", data, region->size);
764 memset(data, 0, region->size);
765 region->zero_filled = true;
766 /* Even though the user usually fills afterward, we don't weakrelease.
767 * The reason is suballocations don't affect the weakrelease state of their
768 * parents. Thus if only suballocations are accessed, the release would stay
769 * weak forever. */
770 nine_pointer_strongrelease(allocator, new_allocation);
771 }
772 DBG("ALLOCATION SUCCESSFUL\n");
773 debug_dump_allocation_state(new_allocation);
774 return new_allocation;
775 }
776
777 void *data = align_calloc(size, NINE_ALLOCATION_ALIGNMENT);
778 if (!data) {
779 DBG("ALLOCATION FAILED\n");
780 return NULL;
781 }
782
783 new_allocation->allocation_type = NINE_MALLOC_ALLOC;
784 new_allocation->memory.malloc.buf = data;
785 new_allocation->memory.malloc.allocation_size = size;
786 list_inithead(&new_allocation->list_free);
787 allocator->total_allocations += size;
788 allocator->total_locked_memory += size;
789 allocator->total_virtual_memory += size;
790 DBG("ALLOCATION SUCCESSFUL\n");
791 debug_dump_allocation_state(new_allocation);
792 return new_allocation;
793 }
794
795 /* Release memory */
796 static void
nine_free_internal(struct nine_allocator * allocator,struct nine_allocation * allocation)797 nine_free_internal(struct nine_allocator *allocator, struct nine_allocation *allocation)
798 {
799 DBG("RELEASING ALLOCATION\n");
800 debug_dump_allocation_state(allocation);
801 if (allocation->allocation_type == NINE_MALLOC_ALLOC) {
802 allocator->total_allocations -= allocation->memory.malloc.allocation_size;
803 allocator->total_locked_memory -= allocation->memory.malloc.allocation_size;
804 allocator->total_virtual_memory -= allocation->memory.malloc.allocation_size;
805 align_free(allocation->memory.malloc.buf);
806 } else if (allocation->allocation_type == NINE_MEMFD_ALLOC ||
807 allocation->allocation_type == NINE_MEMFD_SUBALLOC) {
808 struct nine_memfd_file *memfd_file = nine_get_memfd_file_backing(allocation);
809 struct nine_memfd_file_region *region = nine_get_memfd_region_backing(allocation);
810 if (allocation->weak_unlock)
811 region->num_weak_unlocks--;
812 if (allocation->allocation_type == NINE_MEMFD_ALLOC)
813 remove_allocation(allocator, memfd_file, region);
814 }
815
816 slab_free_st(&allocator->allocation_pool, allocation);
817 debug_dump_allocator_state(allocator);
818 }
819
820
821 void
nine_free(struct nine_allocator * allocator,struct nine_allocation * allocation)822 nine_free(struct nine_allocator *allocator, struct nine_allocation *allocation)
823 {
824 nine_flush_pending_frees(allocator);
825 nine_flush_pending_releases(allocator);
826 nine_free_internal(allocator, allocation);
827 }
828
829 /* Called from the worker thread. Similar to nine_free except we are not in the main thread, thus
830 * we are disallowed to change the allocator structures except the fields reserved
831 * for the worker. In addition, the allocation is allowed to not being unlocked (the release
832 * will unlock it) */
nine_free_worker(struct nine_allocator * allocator,struct nine_allocation * allocation)833 void nine_free_worker(struct nine_allocator *allocator, struct nine_allocation *allocation)
834 {
835 /* Add the allocation to the list of pending allocations to free */
836 pthread_mutex_lock(&allocator->mutex_pending_frees);
837 /* The order of free matters as suballocations are supposed to be released first */
838 list_addtail(&allocation->list_free, &allocator->pending_frees);
839 pthread_mutex_unlock(&allocator->mutex_pending_frees);
840 }
841
842 /* Lock an allocation, and retrieve the pointer */
843 void *
nine_get_pointer(struct nine_allocator * allocator,struct nine_allocation * allocation)844 nine_get_pointer(struct nine_allocator *allocator, struct nine_allocation *allocation)
845 {
846 struct nine_memfd_file *memfd_file;
847 struct nine_memfd_file_region *region;
848
849 nine_flush_pending_releases(allocator);
850 DBG("allocation_type: %d\n", allocation->allocation_type);
851
852 if (allocation->allocation_type == NINE_MALLOC_ALLOC)
853 return allocation->memory.malloc.buf;
854 if (allocation->allocation_type == NINE_EXTERNAL_ALLOC)
855 return allocation->memory.external.buf;
856
857 memfd_file = nine_get_memfd_file_backing(allocation);
858 region = nine_get_memfd_region_backing(allocation);
859 if (!nine_memfd_region_map(allocator, memfd_file, region)) {
860 DBG("Couldn't map memfd region for get_pointer\n");
861 return NULL;
862 }
863
864 move_region(&memfd_file->locked_mapped_allocated_regions, region); /* Note: redundant if region->num_locks */
865 region->num_locks++;
866
867 if (region->num_locks == 1)
868 allocator->total_locked_memory += region->size;
869 if (allocation->weak_unlock)
870 region->num_weak_unlocks--;
871 allocation->weak_unlock = false;
872 region->zero_filled = false;
873
874
875 if (allocation->allocation_type == NINE_MEMFD_ALLOC)
876 return region->map;
877 if (allocation->allocation_type == NINE_MEMFD_SUBALLOC)
878 return region->map + allocation->memory.submemfd.relative_offset;
879
880 assert(false);
881 return NULL;
882 }
883
884 /* Unlock an allocation, but with hint that we might lock again soon */
885 void
nine_pointer_weakrelease(struct nine_allocator * allocator,struct nine_allocation * allocation)886 nine_pointer_weakrelease(struct nine_allocator *allocator, struct nine_allocation *allocation)
887 {
888 struct nine_memfd_file_region *region;
889 if (allocation->allocation_type > NINE_MEMFD_SUBALLOC)
890 return;
891
892 region = nine_get_memfd_region_backing(allocation);
893 if (!allocation->weak_unlock)
894 region->num_weak_unlocks++;
895 allocation->weak_unlock = true;
896 region->num_locks--;
897 if (region->num_locks == 0) {
898 struct nine_memfd_file *memfd_file = nine_get_memfd_file_backing(allocation);
899 allocator->total_locked_memory -= region->size;
900 move_region(&memfd_file->weak_unlocked_mapped_allocated_regions, region);
901 }
902 }
903
904 /* Unlock an allocation */
905 void
nine_pointer_strongrelease(struct nine_allocator * allocator,struct nine_allocation * allocation)906 nine_pointer_strongrelease(struct nine_allocator *allocator, struct nine_allocation *allocation)
907 {
908 struct nine_memfd_file_region *region;
909 if (allocation->allocation_type > NINE_MEMFD_SUBALLOC)
910 return;
911
912 region = nine_get_memfd_region_backing(allocation);
913 region->num_locks--;
914 if (region->num_locks == 0) {
915 struct nine_memfd_file *memfd_file = nine_get_memfd_file_backing(allocation);
916 allocator->total_locked_memory -= region->size;
917 if (region->num_weak_unlocks)
918 move_region(&memfd_file->weak_unlocked_mapped_allocated_regions, region);
919 else
920 move_region(&memfd_file->unlocked_mapped_allocated_regions, region);
921 }
922 }
923
924 /* Delay a release to when a given counter becomes zero */
925 void
nine_pointer_delayedstrongrelease(struct nine_allocator * allocator,struct nine_allocation * allocation,unsigned * counter)926 nine_pointer_delayedstrongrelease(struct nine_allocator *allocator, struct nine_allocation *allocation, unsigned *counter)
927 {
928 if (allocation->allocation_type > NINE_MEMFD_SUBALLOC)
929 return;
930
931 assert(allocation->pending_counter == NULL || allocation->pending_counter == counter);
932 allocation->pending_counter = counter;
933 allocation->locks_on_counter++;
934
935 if (list_is_empty(&allocation->list_release))
936 list_add(&allocation->list_release, &allocator->pending_releases);
937 }
938
939 /* Create a suballocation of an allocation */
940 struct nine_allocation *
nine_suballocate(struct nine_allocator * allocator,struct nine_allocation * allocation,int offset)941 nine_suballocate(struct nine_allocator* allocator, struct nine_allocation *allocation, int offset)
942 {
943 struct nine_allocation *new_allocation = slab_alloc_st(&allocator->allocation_pool);
944 if (!new_allocation)
945 return NULL;
946
947 DBG("Suballocate allocation at offset: %d\n", offset);
948 assert(allocation->allocation_type != NINE_MEMFD_SUBALLOC);
949 list_inithead(&new_allocation->list_free);
950
951 if (allocation->allocation_type != NINE_MEMFD_ALLOC) {
952 new_allocation->allocation_type = NINE_EXTERNAL_ALLOC;
953 if (allocation->allocation_type == NINE_MALLOC_ALLOC)
954 new_allocation->memory.external.buf = allocation->memory.malloc.buf + offset;
955 else
956 new_allocation->memory.external.buf = allocation->memory.external.buf + offset;
957 return new_allocation;
958 }
959 new_allocation->allocation_type = NINE_MEMFD_SUBALLOC;
960 new_allocation->memory.submemfd.parent = &allocation->memory.memfd;
961 new_allocation->memory.submemfd.relative_offset = offset;
962 new_allocation->locks_on_counter = 0;
963 new_allocation->pending_counter = NULL;
964 new_allocation->weak_unlock = false;
965 list_inithead(&new_allocation->list_release);
966 debug_dump_allocation_state(new_allocation);
967 return new_allocation;
968 }
969
970 /* Wrap an external pointer as an allocation */
971 struct nine_allocation *
nine_wrap_external_pointer(struct nine_allocator * allocator,void * data)972 nine_wrap_external_pointer(struct nine_allocator* allocator, void* data)
973 {
974 struct nine_allocation *new_allocation = slab_alloc_st(&allocator->allocation_pool);
975 if (!new_allocation)
976 return NULL;
977 DBG("Wrapping external pointer: %p\n", data);
978 new_allocation->allocation_type = NINE_EXTERNAL_ALLOC;
979 new_allocation->memory.external.buf = data;
980 list_inithead(&new_allocation->list_free);
981 return new_allocation;
982 }
983
984 struct nine_allocator *
nine_allocator_create(struct NineDevice9 * device,int memfd_virtualsizelimit)985 nine_allocator_create(struct NineDevice9 *device, int memfd_virtualsizelimit)
986 {
987 struct nine_allocator* allocator = MALLOC(sizeof(struct nine_allocator));
988
989 if (!allocator)
990 return NULL;
991
992 allocator->device = device;
993 allocator->page_size = sysconf(_SC_PAGESIZE);
994 assert(allocator->page_size == 4 << 10);
995 allocator->num_fd_max = (memfd_virtualsizelimit >= 0) ? MIN2(128, sysconf(_SC_OPEN_MAX)) : 0;
996 allocator->min_file_size = DIVUP(100 * (1 << 20), allocator->page_size) * allocator->page_size; /* 100MB files */
997 allocator->total_allocations = 0;
998 allocator->total_locked_memory = 0;
999 allocator->total_virtual_memory = 0;
1000 allocator->total_virtual_memory_limit = memfd_virtualsizelimit * (1 << 20);
1001 allocator->num_fd = 0;
1002
1003 DBG("Allocator created (ps: %d; fm: %d)\n", allocator->page_size, allocator->num_fd_max);
1004
1005 slab_create(&allocator->allocation_pool, sizeof(struct nine_allocation), 4096);
1006 slab_create(&allocator->region_pool, sizeof(struct nine_memfd_file_region), 4096);
1007 allocator->memfd_pool = CALLOC(allocator->num_fd_max, sizeof(struct nine_memfd_file));
1008 list_inithead(&allocator->pending_releases);
1009 list_inithead(&allocator->pending_frees);
1010 pthread_mutex_init(&allocator->mutex_pending_frees, NULL);
1011 return allocator;
1012 }
1013
1014 void
nine_allocator_destroy(struct nine_allocator * allocator)1015 nine_allocator_destroy(struct nine_allocator* allocator)
1016 {
1017 int i;
1018 DBG("DESTROYING ALLOCATOR\n");
1019 debug_dump_allocator_state(allocator);
1020 nine_flush_pending_releases(allocator);
1021 nine_flush_pending_frees(allocator);
1022 nine_memfd_files_unmap(allocator, true);
1023 pthread_mutex_destroy(&allocator->mutex_pending_frees);
1024
1025 assert(list_is_empty(&allocator->pending_frees));
1026 assert(list_is_empty(&allocator->pending_releases));
1027 for (i = 0; i < allocator->num_fd; i++) {
1028 debug_dump_memfd_state(&allocator->memfd_pool[i], true);
1029 assert(list_is_empty(&allocator->memfd_pool[i].locked_mapped_allocated_regions));
1030 assert(list_is_empty(&allocator->memfd_pool[i].weak_unlocked_mapped_allocated_regions));
1031 assert(list_is_empty(&allocator->memfd_pool[i].unlocked_mapped_allocated_regions));
1032 assert(list_is_singular(&allocator->memfd_pool[i].free_regions));
1033 slab_free_st(&allocator->region_pool,
1034 list_first_entry(&allocator->memfd_pool[i].free_regions,
1035 struct nine_memfd_file_region, list));
1036 close(allocator->memfd_pool[i].fd);
1037 }
1038 slab_destroy(&allocator->allocation_pool);
1039 slab_destroy(&allocator->region_pool);
1040 FREE(allocator->memfd_pool);
1041 FREE(allocator);
1042 }
1043
1044 #else
1045
1046 struct nine_allocation {
1047 unsigned is_external;
1048 void *external;
1049 };
1050
1051 struct nine_allocator {
1052 struct slab_mempool external_allocation_pool;
1053 pthread_mutex_t mutex_slab;
1054 };
1055
1056 struct nine_allocation *
nine_allocate(struct nine_allocator * allocator,unsigned size)1057 nine_allocate(struct nine_allocator *allocator, unsigned size)
1058 {
1059 struct nine_allocation *allocation;
1060 (void)allocator;
1061 assert(sizeof(struct nine_allocation) <= NINE_ALLOCATION_ALIGNMENT);
1062 allocation = align_calloc(size + NINE_ALLOCATION_ALIGNMENT, NINE_ALLOCATION_ALIGNMENT);
1063 allocation->is_external = false;
1064 return allocation;
1065 }
1066
1067
nine_free(struct nine_allocator * allocator,struct nine_allocation * allocation)1068 void nine_free(struct nine_allocator *allocator, struct nine_allocation *allocation)
1069 {
1070 if (allocation->is_external) {
1071 pthread_mutex_lock(&allocator->mutex_slab);
1072 slab_free_st(&allocator->external_allocation_pool, allocation);
1073 pthread_mutex_unlock(&allocator->mutex_slab);
1074 } else
1075 align_free(allocation);
1076 }
1077
nine_free_worker(struct nine_allocator * allocator,struct nine_allocation * allocation)1078 void nine_free_worker(struct nine_allocator *allocator, struct nine_allocation *allocation)
1079 {
1080 nine_free(allocator, allocation);
1081 }
1082
nine_get_pointer(struct nine_allocator * allocator,struct nine_allocation * allocation)1083 void *nine_get_pointer(struct nine_allocator *allocator, struct nine_allocation *allocation)
1084 {
1085 (void)allocator;
1086 if (allocation->is_external)
1087 return allocation->external;
1088 return (uint8_t *)allocation + NINE_ALLOCATION_ALIGNMENT;
1089 }
1090
nine_pointer_weakrelease(struct nine_allocator * allocator,struct nine_allocation * allocation)1091 void nine_pointer_weakrelease(struct nine_allocator *allocator, struct nine_allocation *allocation)
1092 {
1093 (void)allocator;
1094 (void)allocation;
1095 }
1096
nine_pointer_strongrelease(struct nine_allocator * allocator,struct nine_allocation * allocation)1097 void nine_pointer_strongrelease(struct nine_allocator *allocator, struct nine_allocation *allocation)
1098 {
1099 (void)allocator;
1100 (void)allocation;
1101 }
1102
nine_pointer_delayedstrongrelease(struct nine_allocator * allocator,struct nine_allocation * allocation,unsigned * counter)1103 void nine_pointer_delayedstrongrelease(struct nine_allocator *allocator,
1104 struct nine_allocation *allocation,
1105 unsigned *counter)
1106 {
1107 (void)allocator;
1108 (void)allocation;
1109 (void)counter;
1110 }
1111
1112 struct nine_allocation *
nine_suballocate(struct nine_allocator * allocator,struct nine_allocation * allocation,int offset)1113 nine_suballocate(struct nine_allocator* allocator, struct nine_allocation *allocation, int offset)
1114 {
1115 struct nine_allocation *new_allocation;
1116 pthread_mutex_lock(&allocator->mutex_slab);
1117 new_allocation = slab_alloc_st(&allocator->external_allocation_pool);
1118 pthread_mutex_unlock(&allocator->mutex_slab);
1119 new_allocation->is_external = true;
1120 new_allocation->external = (uint8_t *)allocation + NINE_ALLOCATION_ALIGNMENT + offset;
1121 return new_allocation;
1122 }
1123
1124 struct nine_allocation *
nine_wrap_external_pointer(struct nine_allocator * allocator,void * data)1125 nine_wrap_external_pointer(struct nine_allocator* allocator, void* data)
1126 {
1127 struct nine_allocation *new_allocation;
1128 pthread_mutex_lock(&allocator->mutex_slab);
1129 new_allocation = slab_alloc_st(&allocator->external_allocation_pool);
1130 pthread_mutex_unlock(&allocator->mutex_slab);
1131 new_allocation->is_external = true;
1132 new_allocation->external = data;
1133 return new_allocation;
1134 }
1135
1136 struct nine_allocator *
nine_allocator_create(struct NineDevice9 * device,int memfd_virtualsizelimit)1137 nine_allocator_create(struct NineDevice9 *device, int memfd_virtualsizelimit)
1138 {
1139 struct nine_allocator* allocator = MALLOC(sizeof(struct nine_allocator));
1140 (void)device;
1141 (void)memfd_virtualsizelimit;
1142
1143 if (!allocator)
1144 return NULL;
1145
1146 slab_create(&allocator->external_allocation_pool, sizeof(struct nine_allocation), 4096);
1147 pthread_mutex_init(&allocator->mutex_slab, NULL);
1148
1149 return allocator;
1150 }
1151
1152 void
nine_allocator_destroy(struct nine_allocator * allocator)1153 nine_allocator_destroy(struct nine_allocator *allocator)
1154 {
1155 slab_destroy(&allocator->external_allocation_pool);
1156 pthread_mutex_destroy(&allocator->mutex_slab);
1157 }
1158
1159 #endif /* NINE_ENABLE_MEMFD */
1160