1 /*
2  * Copyright (c) 2015 Google, Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "kernel/novm.h"
25 
26 #include <err.h>
27 #include <assert.h>
28 #include <trace.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <lk/init.h>
32 #include <kernel/mutex.h>
33 
34 #define LOCAL_TRACE 0
35 
36 struct novm_arena {
37     mutex_t lock;
38     const char *name;
39     size_t pages;
40     char *map;
41     char *base;
42     size_t size;
43 
44     // We divide the memory up into pages.  If there is memory we can use before
45     // the first aligned page address, then we record it here and the heap will use
46     // it.
47 #define MINIMUM_USEFUL_UNALIGNED_SIZE 64
48     void *unaligned_area;
49     size_t unaligned_size;
50 };
51 
52 
53 /* not a static vm, not using the kernel vm */
54 extern int _end;
55 extern int _end_of_ram;
56 
57 #define MEM_START ((uintptr_t)&_end)
58 #define MEM_SIZE ((MEMBASE + MEMSIZE) - MEM_START)
59 #define DEFAULT_MAP_SIZE (MEMSIZE >> PAGE_SIZE_SHIFT)
60 
61 /* a static list of arenas */
62 #ifndef NOVM_MAX_ARENAS
63 #define NOVM_MAX_ARENAS 1
64 #endif
65 struct novm_arena arena[NOVM_MAX_ARENAS];
66 
novm_get_arenas(struct page_range * ranges,int number_of_ranges)67 int novm_get_arenas(struct page_range* ranges, int number_of_ranges)
68 {
69     int ranges_found = 0;
70     for (int i = 0; i < number_of_ranges && i < NOVM_MAX_ARENAS; i++) {
71         if (arena[i].pages > 0) ranges_found = i + 1;
72         ranges[i].address = (void*)arena[i].base;
73         ranges[i].size = arena[i].pages << PAGE_SIZE_SHIFT;
74     }
75     return ranges_found;
76 }
77 
novm_alloc_unaligned(size_t * size_return)78 void *novm_alloc_unaligned(size_t *size_return)
79 {
80     /* only do the unaligned thing in the first arena */
81     if (arena[0].unaligned_area != NULL) {
82         *size_return = arena[0].unaligned_size;
83         void *result = arena[0].unaligned_area;
84         arena[0].unaligned_area = NULL;
85         arena[0].unaligned_size = 0;
86         return result;
87     }
88     *size_return = PAGE_SIZE;
89     return novm_alloc_pages(1, NOVM_ARENA_ANY);
90 }
91 
in_arena(struct novm_arena * n,void * p)92 static bool in_arena(struct novm_arena *n, void *p)
93 {
94     if (n->size == 0)
95         return false;
96 
97     char *ptr = (char *)p;
98     char *base = n->base;
99     return ptr >= base && ptr < base + n->size;
100 }
101 
novm_init_helper(struct novm_arena * n,const char * name,uintptr_t arena_start,uintptr_t arena_size,char * default_map,size_t default_map_size)102 static void novm_init_helper(struct novm_arena *n, const char *name,
103                              uintptr_t arena_start, uintptr_t arena_size,
104                              char *default_map, size_t default_map_size)
105 {
106     uintptr_t start = round_up(arena_start, PAGE_SIZE);
107     uintptr_t size = round_down(arena_start + arena_size, PAGE_SIZE) - start;
108 
109     mutex_init(&n->lock);
110 
111     size_t map_size = size >> PAGE_SIZE_SHIFT;
112     char *map = default_map;
113     if (map == NULL || default_map_size < map_size) {
114         // allocate the map out of the arena itself
115         map = (char *)arena_start;
116 
117         // Grab enough map for 16Mbyte of arena each time around the loop.
118         while (start - arena_start < map_size) {
119             start += PAGE_SIZE;
120             size -= PAGE_SIZE;
121             map_size--;
122         }
123 
124         if ((char *)start - (map + round_up(map_size, 4)) >= MINIMUM_USEFUL_UNALIGNED_SIZE) {
125             n->unaligned_area = map + round_up(map_size, 4);
126             n->unaligned_size = (char *)start - (map + round_up(map_size, 4));
127         }
128     } else if (start - arena_start >= MINIMUM_USEFUL_UNALIGNED_SIZE) {
129         n->unaligned_area = (char *)arena_start;
130         n->unaligned_size = start - arena_start;
131     }
132     n->name = name;
133     n->map = map;
134     memset(n->map, 0, map_size);
135     n->pages = map_size;
136     n->base = (char *)start;
137     n->size = size;
138 }
139 
novm_add_arena(const char * name,uintptr_t arena_start,uintptr_t arena_size)140 void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size)
141 {
142     for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
143         if (arena[i].pages == 0) {
144             novm_init_helper(&arena[i], name, arena_start, arena_size, NULL, 0);
145             return;
146         }
147     }
148     panic("novm_add_arena: too many arenas added, bump NOVM_MAX_ARENAS!\n");
149 }
150 
novm_init(uint level)151 static void novm_init(uint level)
152 {
153     static char mem_allocation_map[DEFAULT_MAP_SIZE];
154     novm_init_helper(&arena[0], "main", MEM_START, MEM_SIZE, mem_allocation_map, DEFAULT_MAP_SIZE);
155 }
156 
157 LK_INIT_HOOK(novm, &novm_init, LK_INIT_LEVEL_PLATFORM_EARLY - 1);
158 
novm_alloc_helper(struct novm_arena * n,size_t pages)159 void *novm_alloc_helper(struct novm_arena *n, size_t pages)
160 {
161     if (pages == 0 || pages > n->pages)
162         return NULL;
163 
164     mutex_acquire(&n->lock);
165     for (size_t i = 0; i <= n->pages - pages; i++) {
166         bool found = true;
167         for (size_t j = 0; j < pages; j++) {
168             if (n->map[i + j] != 0) {
169                 i += j;
170                 found = false;
171                 break;
172             }
173         }
174         if (found) {
175             memset(n->map + i, 1, pages);
176             mutex_release(&n->lock);
177             return n->base + (i << PAGE_SIZE_SHIFT);
178         }
179     }
180     mutex_release(&n->lock);
181 
182     return NULL;
183 }
184 
novm_alloc_pages(size_t pages,uint32_t arena_bitmap)185 void *novm_alloc_pages(size_t pages, uint32_t arena_bitmap)
186 {
187     LTRACEF("pages %zu\n", pages);
188 
189     /* allocate from any arena */
190     for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
191         if (arena_bitmap & (1U << i)) {
192             void *result = novm_alloc_helper(&arena[i], pages);
193             if (result)
194                 return result;
195         }
196     }
197 
198     return NULL;
199 }
200 
novm_free_pages(void * address,size_t pages)201 void novm_free_pages(void *address, size_t pages)
202 {
203     LTRACEF("address %p, pages %zu\n", address, pages);
204 
205     struct novm_arena *n = NULL;
206     for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
207         if (in_arena(&arena[i], address)) {
208             n = &arena[i];
209             break;
210         }
211     }
212     if (!n)
213         return;
214 
215     DEBUG_ASSERT(in_arena(n, address));
216 
217     size_t index = ((char *)address - (char *)(n->base)) >> PAGE_SIZE_SHIFT;
218     char *map = n->map;
219 
220     mutex_acquire(&n->lock);
221     for (size_t i = 0; i < pages; i++) map[index + i] = 0;
222     mutex_release(&n->lock);
223 }
224 
novm_alloc_specific_pages(void * address,size_t pages)225 status_t novm_alloc_specific_pages(void *address, size_t pages)
226 {
227     LTRACEF("address %p, pages %zu\n", address, pages);
228 
229     struct novm_arena *n = NULL;
230     for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
231         if (in_arena(&arena[i], address)) {
232             n = &arena[i];
233             break;
234         }
235     }
236     if (!n)
237         return ERR_NOT_FOUND;
238 
239     size_t index = ((char *)address - (char *)(n->base)) >> PAGE_SIZE_SHIFT;
240     char *map = n->map;
241 
242     status_t err = NO_ERROR;
243 
244     mutex_acquire(&n->lock);
245     for (size_t i = 0; i < pages; i++) {
246         if (map[index + i] != 0) {
247             err = ERR_NO_MEMORY;
248             break;
249         }
250         map[index + i] = 1;
251     }
252     mutex_release(&n->lock);
253 
254     return err;
255 }
256 
257 
258 #if LK_DEBUGLEVEL > 1
259 #if WITH_LIB_CONSOLE
260 
261 #include <lib/console.h>
262 
263 static int cmd_novm(int argc, const cmd_args *argv);
264 static void novm_dump(void);
265 
266 STATIC_COMMAND_START
267 STATIC_COMMAND("novm", "page allocator (for devices without VM support) debug commands", &cmd_novm)
268 STATIC_COMMAND_END(novm);
269 
cmd_novm(int argc,const cmd_args * argv)270 static int cmd_novm(int argc, const cmd_args *argv)
271 {
272     if (argc < 2) {
273 notenoughargs:
274         printf("not enough arguments\n");
275 usage:
276         printf("usage:\n");
277         printf("\t%s info\n", argv[0].str);
278         printf("\t%s alloc <numberofpages> [arena bitmap]\n", argv[0].str);
279         printf("\t%s free <address> [numberofpages]\n", argv[0].str);
280         return -1;
281     }
282 
283     if (strcmp(argv[1].str, "info") == 0) {
284         novm_dump();
285     } else if (strcmp(argv[1].str, "alloc") == 0) {
286         if (argc < 3) goto notenoughargs;
287 
288         uint32_t arena_bitmap = (argc >= 4) ? argv[3].u : NOVM_ARENA_ANY;
289         void *ptr = novm_alloc_pages(argv[2].u, arena_bitmap);
290         printf("novm_alloc_pages returns %p\n", ptr);
291     } else if (strcmp(argv[1].str, "free") == 0) {
292         if (argc < 3) goto notenoughargs;
293         size_t pages = (argc >= 4) ? argv[3].u : 1;
294         novm_free_pages(argv[2].p, pages);
295         printf("novm_free_pages: %zd pages at %p\n", pages, argv[2].p);
296     } else {
297         printf("unrecognized command\n");
298         goto usage;
299     }
300 
301     return 0;
302 }
303 
novm_dump_arena(struct novm_arena * n)304 static void novm_dump_arena(struct novm_arena *n)
305 {
306     if (n->pages == 0) {
307         return;
308     }
309 
310     mutex_acquire(&n->lock);
311     printf("name '%s', %d pages, each %zdk (%zdk in all)\n", n->name, n->pages, PAGE_SIZE >> 10, (PAGE_SIZE * n->pages) >> 10);
312     printf("  range: %p-%p\n", (void *)n->base, (char *)n->base + n->size);
313     printf("  unaligned range: %p-%p\n", n->unaligned_area, n->unaligned_area + n->unaligned_size);
314     unsigned i;
315     size_t in_use = 0;
316     for (i = 0; i < n->pages; i++) if (n->map[i] != 0) in_use++;
317     printf("  %zd/%zd in use\n", in_use, n->pages);
318 #define MAX_PRINT 1024u
319     for (i = 0; i < MAX_PRINT && i < n->pages; i++) {
320         if ((i & 63) == 0) printf("    ");
321         printf("%c", n->map[i] ? '*' : '.');
322         if ((i & 63) == 63) printf("\n");
323     }
324     if (i == MAX_PRINT && n->pages > MAX_PRINT) {
325         printf("    etc., %zd more pages.", n->pages - MAX_PRINT);
326     }
327     printf("\n");
328     mutex_release(&n->lock);
329 }
330 
novm_dump(void)331 static void novm_dump(void)
332 {
333     for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
334         novm_dump_arena(&arena[i]);
335     }
336 }
337 
338 #endif
339 #endif
340 
341