1 // Copyright 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #pragma once
15 
16 #include "aemu/base/c_header.h"
17 #include <assert.h>
18 #include <errno.h>
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 
24 ANDROID_BEGIN_HEADER
25 
26 #ifdef ADDRESS_SPACE_NAMESPACE
27 namespace ADDRESS_SPACE_NAMESPACE {
28 #endif
29 
30 // This is ported from goldfish_address_space, allowing it to be used for
31 // general sub-allocations of any buffer range.
32 // It is also a pure header library, so there are no compiler tricks needed
33 // to use this in a particular implementation. please don't include this
34 // in a file that is included everywhere else, though.
35 
36 /* Represents a continuous range of addresses and a flag if this block is
37  * available
38  */
39 struct address_block {
40     uint64_t offset;
41     union {
42         uint64_t size_available; /* VMSTATE_x does not support bit fields */
43         struct {
44             uint64_t size : 63;
45             uint64_t available : 1;
46         };
47     };
48 };
49 
50 /* A dynamic array of address blocks, with the following invariant:
51  * blocks[i].size > 0
52  * blocks[i+1].offset = blocks[i].offset + blocks[i].size
53  */
54 struct address_space_allocator {
55     struct address_block *blocks;
56     int size;
57     int capacity;
58     uint64_t total_bytes;
59 };
60 
61 #define ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET (~(uint64_t)0)
62 #if defined(__APPLE__) && defined(__arm64__)
63 #define ANDROID_EMU_ADDRESS_SPACE_DEFAULT_PAGE_SIZE 16384
64 #else
65 #define ANDROID_EMU_ADDRESS_SPACE_DEFAULT_PAGE_SIZE 4096
66 #endif
67 
68 /* The assert function to abort if something goes wrong. */
address_space_assert(bool condition)69 static void address_space_assert(bool condition) {
70 #ifdef ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC
71     ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC(condition);
72 #else
73     assert(condition);
74 #endif
75 }
76 
address_space_realloc(void * ptr,size_t size)77 static void* address_space_realloc(void* ptr, size_t size) {
78 #ifdef ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC
79     return ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC(ptr, size);
80 #else
81     void* res = realloc(ptr, size);
82     return res;
83 #endif
84 }
85 
address_space_free(void * ptr)86 static void address_space_free(void* ptr) {
87 #ifdef ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC
88     return ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC(ptr);
89 #else
90     free(ptr);
91 #endif
92 }
93 
94 /* Looks for the smallest (to reduce fragmentation) available block with size to
95  * fit the requested amount and returns its index or -1 if none is available.
96  */
address_space_allocator_find_available_block(struct address_block * block,int n_blocks,uint64_t size_at_least)97 static int address_space_allocator_find_available_block(
98     struct address_block *block,
99     int n_blocks,
100     uint64_t size_at_least)
101 {
102     int index = -1;
103     uint64_t size_at_index = 0;
104     int i;
105 
106     address_space_assert(n_blocks >= 1);
107 
108     for (i = 0; i < n_blocks; ++i, ++block) {
109         uint64_t this_size = block->size;
110         address_space_assert(this_size > 0);
111 
112         if (this_size >= size_at_least && block->available &&
113             (index < 0 || this_size < size_at_index)) {
114             index = i;
115             size_at_index = this_size;
116         }
117     }
118 
119     return index;
120 }
121 
address_space_allocator_find_available_block_at_offset(struct address_block * block,int n_blocks,uint64_t size_at_least,uint64_t offset)122 static int address_space_allocator_find_available_block_at_offset(
123     struct address_block *block,
124     int n_blocks,
125     uint64_t size_at_least,
126     uint64_t offset)
127 {
128     int index = -1;
129     uint64_t size_at_index = 0;
130     int i;
131 
132     if (n_blocks <= 0) {
133         return -1;
134     }
135 
136     for (i = 0; i < n_blocks; ++i, ++block) {
137         uint64_t this_size = block->size;
138         if (this_size <= 0) {
139             return -1;
140         }
141 
142         if (this_size >= size_at_least && block->available &&
143             offset >= block->offset &&
144             offset < block->offset + this_size) {
145             index = i;
146             size_at_index = this_size;
147             return index;
148         }
149     }
150 
151     return index;
152 }
153 
154 static int
address_space_allocator_grow_capacity(int old_capacity)155 address_space_allocator_grow_capacity(int old_capacity) {
156     address_space_assert(old_capacity >= 1);
157 
158     return old_capacity + old_capacity;
159 }
160 
161 /* Inserts one more address block right after i'th (by borrowing i'th size) and
162  * adjusts sizes:
163  * pre:
164  *   size > blocks[i].size
165  *
166  * post:
167  *   * might reallocate allocator->blocks if there is no capacity to insert one
168  *   * blocks[i].size -= size;
169  *   * blocks[i+1].size = size;
170  */
171 static struct address_block*
address_space_allocator_split_block(struct address_space_allocator * allocator,int i,uint64_t size)172 address_space_allocator_split_block(
173     struct address_space_allocator *allocator,
174     int i,
175     uint64_t size)
176 {
177     address_space_assert(allocator->capacity >= 1);
178     address_space_assert(allocator->size >= 1);
179     address_space_assert(allocator->size <= allocator->capacity);
180     address_space_assert(i >= 0);
181     address_space_assert(i < allocator->size);
182     address_space_assert(size < allocator->blocks[i].size);
183 
184     if (allocator->size == allocator->capacity) {
185         int new_capacity = address_space_allocator_grow_capacity(allocator->capacity);
186         allocator->blocks =
187             (struct address_block*)
188             address_space_realloc(
189                 allocator->blocks,
190                 sizeof(struct address_block) * new_capacity);
191         address_space_assert(allocator->blocks);
192         allocator->capacity = new_capacity;
193     }
194 
195     struct address_block *blocks = allocator->blocks;
196 
197     /*   size = 5, i = 1
198      *   [ 0 | 1 |  2  |  3  | 4 ]  =>  [ 0 | 1 | new |  2  | 3 | 4 ]
199      *         i  (i+1) (i+2)                 i  (i+1) (i+2)
200      */
201     memmove(&blocks[i + 2], &blocks[i + 1],
202             sizeof(struct address_block) * (allocator->size - i - 1));
203 
204     struct address_block *to_borrow_from = &blocks[i];
205     struct address_block *new_block = to_borrow_from + 1;
206 
207     uint64_t new_size = to_borrow_from->size - size;
208 
209     to_borrow_from->size = new_size;
210 
211     new_block->offset = to_borrow_from->offset + new_size;
212     new_block->size = size;
213     new_block->available = 1;
214 
215     ++allocator->size;
216 
217     return new_block;
218 }
219 
220 /* Inserts one more address block right after i'th (by borrowing i'th size) and
221  * adjusts sizes:
222  * pre:
223  *   size < blocks[i].size
224  *   offset >= blocks[i].offset
225  *   offset < blocks[i].offset + blocks[i].size
226  *
227  * post:
228  *   * might reallocate allocator->blocks if there is no capacity to insert one
229  *   * blocks[i].size = offset - blocks[i].offset;
230  *   * blocks[i+1].size = offset;
231  *   * blocks[i+1].offset = offset;
232  */
233 static struct address_block*
address_space_allocator_split_block_at_offset(struct address_space_allocator * allocator,int i,uint64_t size,uint64_t offset)234 address_space_allocator_split_block_at_offset(
235     struct address_space_allocator *allocator,
236     int i,
237     uint64_t size,
238     uint64_t offset)
239 {
240     uint64_t old_block_size;
241     bool need_extra_block;
242     int new_block_index_offset;
243 
244     address_space_assert(allocator->capacity >= 1);
245     address_space_assert(allocator->size >= 1);
246     address_space_assert(allocator->size <= allocator->capacity);
247     address_space_assert(i >= 0);
248     address_space_assert(i < allocator->size);
249     address_space_assert(size < allocator->blocks[i].size);
250     address_space_assert(offset >= allocator->blocks[i].offset);
251     if (offset == allocator->blocks[i].offset) {
252         // special case, return the current block (after spliting its tail).
253         if (size < allocator->blocks[i].size) {
254             // split the tail if necessary
255             address_space_allocator_split_block_at_offset(allocator, i,
256                     allocator->blocks[i].size - size, offset + size);
257         }
258         return &allocator->blocks[i];
259     }
260 
261     need_extra_block =
262         (allocator->blocks[i].size - size - (offset - allocator->blocks[i].offset)) != 0;
263     new_block_index_offset = need_extra_block ? 1 : 0;
264 
265     if (allocator->size + new_block_index_offset >= allocator->capacity) {
266         int new_capacity = address_space_allocator_grow_capacity(allocator->capacity + new_block_index_offset);
267         allocator->blocks =
268             (struct address_block*)
269             address_space_realloc(
270                 allocator->blocks,
271                 sizeof(struct address_block) * new_capacity);
272         address_space_assert(allocator->blocks);
273         allocator->capacity = new_capacity;
274     }
275 
276     struct address_block *blocks = allocator->blocks;
277 
278     /*   size = 5, i = 1
279      *   [ 0 | 1 |  2  |  3  | 4 ]  =>  [ 0 | 1 | new |  new(for slop)  | 2    | 3 | 4]
280      *         i  (i+1) (i+2)                 i  (i+1) (i+2)              (i+3)
281      */
282     memmove(&blocks[i + 2 + new_block_index_offset], &blocks[i + 1],
283             sizeof(struct address_block) * (allocator->size - i - 1));
284 
285     struct address_block *to_borrow_from = &blocks[i];
286     struct address_block *new_block = to_borrow_from + 1;
287     struct address_block *extra_block = to_borrow_from + 2;
288 
289     old_block_size = to_borrow_from->size;
290     to_borrow_from->size = offset - to_borrow_from->offset;
291 
292     new_block->offset = offset;
293     new_block->size = size;
294     new_block->available = 1;
295 
296     ++allocator->size;
297 
298     if (need_extra_block) {
299         extra_block->offset = offset + size;
300         extra_block->size = old_block_size - size - to_borrow_from->size;
301         extra_block->available = 1;
302 
303         ++allocator->size;
304     }
305 
306     return new_block;
307 }
308 
309 /* Marks i'th block as available. If adjacent ((i-1) and (i+1)) blocks are also
310  * available, it merges i'th block with them.
311  * pre:
312  *   i < allocator->size
313  * post:
314  *   i'th block is merged with adjacent ones if they are available, blocks that
315  *   were merged from are removed. allocator->size is updated if blocks were
316  *   removed.
317  */
address_space_allocator_release_block(struct address_space_allocator * allocator,int i)318 static void address_space_allocator_release_block(
319     struct address_space_allocator *allocator,
320     int i)
321 {
322     struct address_block *blocks = allocator->blocks;
323     int before = i - 1;
324     int after = i + 1;
325     int size = allocator->size;
326 
327     address_space_assert(i >= 0);
328     address_space_assert(i < size);
329 
330     blocks[i].available = 1;
331 
332     if (before >= 0 && blocks[before].available) {
333         if (after < size && blocks[after].available) {
334             // merge (before, i, after) into before
335             blocks[before].size += (blocks[i].size + blocks[after].size);
336 
337             size -= 2;
338             memmove(&blocks[i], &blocks[i + 2],
339                 sizeof(struct address_block) * (size - i));
340             allocator->size = size;
341         } else {
342             // merge (before, i) into before
343             blocks[before].size += blocks[i].size;
344 
345             --size;
346             memmove(&blocks[i], &blocks[i + 1],
347                 sizeof(struct address_block) * (size - i));
348             allocator->size = size;
349         }
350     } else if (after < size && blocks[after].available) {
351         // merge (i, after) into i
352         blocks[i].size += blocks[after].size;
353 
354         --size;
355         memmove(&blocks[after], &blocks[after + 1],
356             sizeof(struct address_block) * (size - after));
357         allocator->size = size;
358     }
359 
360 }
361 
362 /* Takes a size to allocate an address block and returns an offset where this
363  * block is allocated. This block will not be available for other callers unless
364  * it is explicitly deallocated (see address_space_allocator_deallocate below).
365  */
address_space_allocator_allocate(struct address_space_allocator * allocator,uint64_t size)366 static uint64_t address_space_allocator_allocate(
367     struct address_space_allocator *allocator,
368     uint64_t size)
369 {
370     int i = address_space_allocator_find_available_block(allocator->blocks,
371                                                          allocator->size,
372                                                          size);
373     if (i < 0) {
374         return ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET;
375     } else {
376         address_space_assert(i < allocator->size);
377 
378         struct address_block *block = &allocator->blocks[i];
379         address_space_assert(block->size >= size);
380 
381         if (block->size > size) {
382             block = address_space_allocator_split_block(allocator, i, size);
383         }
384 
385         address_space_assert(block->size == size);
386         block->available = 0;
387 
388         return block->offset;
389     }
390 }
391 
392 /* Takes a size to allocate an address block and returns an offset where this
393  * block is allocated. This block will not be available for other callers unless
394  * it is explicitly deallocated (see address_space_allocator_deallocate below).
395  */
address_space_allocator_allocate_fixed(struct address_space_allocator * allocator,uint64_t size,uint64_t offset)396 static int address_space_allocator_allocate_fixed(
397     struct address_space_allocator *allocator,
398     uint64_t size,
399     uint64_t offset)
400 {
401     int i = address_space_allocator_find_available_block_at_offset(
402         allocator->blocks,
403         allocator->size,
404         size,
405         offset);
406 
407     if (i < 0) {
408         return -1;
409     } else {
410         address_space_assert(i < allocator->size);
411 
412         struct address_block *block = &allocator->blocks[i];
413         address_space_assert(block->size >= size);
414 
415         if (block->size > size) {
416             block = address_space_allocator_split_block_at_offset(allocator, i, size, offset);
417         }
418 
419         address_space_assert(block->size == size);
420         block->available = 0;
421 
422         return 0;
423     }
424 }
425 
426 /* Takes an offset returned from address_space_allocator_allocate ealier
427  * (see above) and marks this block as available for further allocation.
428  */
address_space_allocator_deallocate(struct address_space_allocator * allocator,uint64_t offset)429 static uint32_t address_space_allocator_deallocate(
430     struct address_space_allocator *allocator,
431     uint64_t offset)
432 {
433     struct address_block *block = allocator->blocks;
434     int size = allocator->size;
435     int i;
436 
437     address_space_assert(size >= 1);
438 
439     for (i = 0; i < size; ++i, ++block) {
440         if (block->offset == offset) {
441             if (block->available) {
442                 return EINVAL;
443             } else {
444                 address_space_allocator_release_block(allocator, i);
445                 return 0;
446             }
447         }
448     }
449 
450     return EINVAL;
451 }
452 
453 /* Creates a seed block. */
address_space_allocator_init(struct address_space_allocator * allocator,uint64_t size,int initial_capacity)454 static void address_space_allocator_init(
455     struct address_space_allocator *allocator,
456     uint64_t size,
457     int initial_capacity)
458 {
459     address_space_assert(initial_capacity >= 1);
460 
461     allocator->blocks =
462         (struct address_block*)
463         malloc(sizeof(struct address_block) * initial_capacity);
464     memset(allocator->blocks, 0, sizeof(struct address_block) * initial_capacity);
465     address_space_assert(allocator->blocks);
466 
467     struct address_block *block = allocator->blocks;
468 
469     block->offset = 0;
470     block->size = size;
471     block->available = 1;
472 
473     allocator->size = 1;
474     allocator->capacity = initial_capacity;
475     allocator->total_bytes = size;
476 }
477 
478 /* At this point there should be no used blocks and all available blocks must
479  * have been merged into one block.
480  */
address_space_allocator_destroy(struct address_space_allocator * allocator)481 static void address_space_allocator_destroy(
482     struct address_space_allocator *allocator)
483 {
484     address_space_assert(allocator->size == 1);
485     address_space_assert(allocator->capacity >= allocator->size);
486     address_space_assert(allocator->blocks[0].available);
487     address_space_free(allocator->blocks);
488 }
489 
490 /* Destroy function if we don't care what was previoulsy allocated.
491  * have been merged into one block.
492  */
address_space_allocator_destroy_nocleanup(struct address_space_allocator * allocator)493 static void address_space_allocator_destroy_nocleanup(
494     struct address_space_allocator *allocator)
495 {
496     address_space_free(allocator->blocks);
497 }
498 
499 /* Resets the state of the allocator to the initial state without
500  * performing any dynamic memory management. */
address_space_allocator_reset(struct address_space_allocator * allocator)501 static void address_space_allocator_reset(
502     struct address_space_allocator *allocator)
503 {
504     address_space_assert(allocator->size >= 1);
505 
506     allocator->size = 1;
507 
508     struct address_block* block = allocator->blocks;
509     block->offset = 0;
510     block->size = allocator->total_bytes;
511     block->available = 1;
512 }
513 
514 typedef void (*address_block_iter_func_t)(void* context, struct address_block*);
515 typedef void (*address_space_allocator_iter_func_t)(void* context, struct address_space_allocator*);
516 
address_space_allocator_run(struct address_space_allocator * allocator,void * context,address_space_allocator_iter_func_t allocator_func,address_block_iter_func_t block_func)517 static void address_space_allocator_run(
518     struct address_space_allocator *allocator,
519     void* context,
520     address_space_allocator_iter_func_t allocator_func,
521     address_block_iter_func_t block_func)
522 {
523     struct address_block *block = 0;
524     int size;
525     int i;
526 
527     allocator_func(context, allocator);
528 
529     block = allocator->blocks;
530     size = allocator->size;
531 
532     address_space_assert(size >= 1);
533 
534     for (i = 0; i < size; ++i, ++block) {
535         block_func(context, block);
536     }
537 }
538 
539 #ifdef ADDRESS_SPACE_NAMESPACE
540 }
541 #endif
542 
543 ANDROID_END_HEADER
544