1 /*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <kernel/vm.h>
24 #include "vm_priv.h"
25 #include "res_group.h"
26
27 #include <trace.h>
28 #include <assert.h>
29 #include <list.h>
30 #include <stdlib.h>
31 #include <err.h>
32 #include <string.h>
33 #include <pow2.h>
34 #include <lib/console.h>
35 #include <kernel/mutex.h>
36 #include <kernel/spinlock.h>
37 #include <inttypes.h>
38
39 #define LOCAL_TRACE 0
40
41 struct pmm_vmm_obj {
42 struct vmm_obj vmm_obj;
43 struct list_node page_list;
44 size_t chunk_count;
45 size_t chunk_size;
46 struct res_group* res_group;
47 struct obj_ref res_group_ref;
48 size_t used_pages;
49 uint32_t flags;
50 struct vm_page *chunk[];
51 };
52
53 #define PMM_OBJ_FLAG_NEEDS_CLEAR (1U)
54 #define PMM_OBJ_FLAG_ALLOW_TAGGED (2U)
55
vmm_obj_to_pmm_obj(struct vmm_obj * vmm_obj)56 static inline struct pmm_vmm_obj* vmm_obj_to_pmm_obj(struct vmm_obj *vmm_obj)
57 {
58 return containerof(vmm_obj, struct pmm_vmm_obj, vmm_obj);
59 }
60
61 static struct list_node arena_list = LIST_INITIAL_VALUE(arena_list);
62 static mutex_t lock = MUTEX_INITIAL_VALUE(lock);
63 static spin_lock_t aux_slock = SPIN_LOCK_INITIAL_VALUE;
64
65 #define PAGE_BELONGS_TO_ARENA(page, arena) \
66 (((uintptr_t)(page) >= (uintptr_t)(arena)->page_array) && \
67 ((uintptr_t)(page) < ((uintptr_t)(arena)->page_array + (arena)->size / PAGE_SIZE * sizeof(vm_page_t))))
68
69 #define PAGE_ADDRESS_FROM_ARENA(page, arena) \
70 (paddr_t)(((uintptr_t)page - (uintptr_t)(arena)->page_array) / sizeof(vm_page_t)) * PAGE_SIZE + (arena)->base;
71
72 #define ADDRESS_IN_ARENA(address, arena) \
73 ((address) >= (arena)->base && (address) <= (arena)->base + (arena)->size - 1)
74
75 static size_t pmm_free_locked(struct list_node *list);
76
page_is_free(const vm_page_t * page)77 static inline bool page_is_free(const vm_page_t *page)
78 {
79 DEBUG_ASSERT(page);
80
81 return !(page->flags & VM_PAGE_FLAG_NONFREE);
82 }
83
clear_page(vm_page_t * page)84 static void clear_page(vm_page_t *page)
85 {
86 paddr_t pa;
87 void *kva;
88
89 pa = vm_page_to_paddr(page);
90 ASSERT(pa != (paddr_t)-1);
91
92 kva = paddr_to_kvaddr(pa);
93 ASSERT(kva);
94
95 memset(kva, 0, PAGE_SIZE);
96 }
97
vm_page_to_paddr(const vm_page_t * page)98 paddr_t vm_page_to_paddr(const vm_page_t *page)
99 {
100 DEBUG_ASSERT(page);
101
102 pmm_arena_t *a;
103 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
104 if (PAGE_BELONGS_TO_ARENA(page, a)) {
105 return PAGE_ADDRESS_FROM_ARENA(page, a);
106 }
107 }
108 return -1;
109 }
110
paddr_to_vm_page(paddr_t addr)111 vm_page_t *paddr_to_vm_page(paddr_t addr)
112 {
113 pmm_arena_t *a;
114 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
115 if (addr >= a->base && addr <= a->base + a->size - 1) {
116 size_t index = (addr - a->base) / PAGE_SIZE;
117 return &a->page_array[index];
118 }
119 }
120 return NULL;
121 }
122
insert_arena(pmm_arena_t * arena)123 static void insert_arena(pmm_arena_t *arena)
124 {
125 /* walk the arena list and add arena based on priority order */
126 pmm_arena_t *a;
127 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
128 if (a->priority > arena->priority) {
129 list_add_before(&a->node, &arena->node);
130 return;
131 }
132 }
133
134 /* walked off the end, add it to the end of the list */
135 list_add_tail(&arena_list, &arena->node);
136 }
137
init_page_array(pmm_arena_t * arena,size_t page_count,size_t reserved_at_start,size_t reserved_at_end)138 static void init_page_array(pmm_arena_t *arena, size_t page_count,
139 size_t reserved_at_start,
140 size_t reserved_at_end)
141 {
142 ASSERT(reserved_at_start < page_count);
143 ASSERT(reserved_at_end <= page_count);
144
145 /* clear page array */
146 memset(arena->page_array, 0, page_count * sizeof(vm_page_t));
147
148 /* add them to the free list, skipping reserved pages */
149 for (size_t i = 0; i < page_count; i++) {
150 vm_page_t *p = &arena->page_array[i];
151
152 if (i < reserved_at_start || i >= (page_count - reserved_at_end)) {
153 p->flags |= VM_PAGE_FLAG_NONFREE;
154 continue;
155 }
156
157 list_add_tail(&arena->free_list, &p->node);
158
159 arena->free_count++;
160 }
161 }
162
pmm_add_arena(pmm_arena_t * arena)163 status_t pmm_add_arena(pmm_arena_t *arena)
164 {
165 LTRACEF("arena %p name '%s' base 0x%" PRIxPADDR " size 0x%zx\n", arena, arena->name, arena->base, arena->size);
166
167 DEBUG_ASSERT(arena);
168 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base));
169 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size));
170 DEBUG_ASSERT(arena->size > 0);
171
172 /* zero out some of the structure */
173 arena->free_count = 0;
174 arena->reserved_count = 0;
175 list_initialize(&arena->free_list);
176
177 if (arena->flags & PMM_ARENA_FLAG_KMAP) {
178 /* lookup kernel mapping address */
179 arena->kvaddr = (vaddr_t)paddr_to_kvaddr(arena->base);
180 ASSERT(arena->kvaddr);
181 } else {
182 arena->kvaddr = 0;
183 }
184
185 /* allocate an array of pages to back this one */
186 size_t page_count = arena->size / PAGE_SIZE;
187 arena->page_array = boot_alloc_mem(page_count * sizeof(vm_page_t));
188
189 /* initialize it */
190 init_page_array(arena, page_count, 0, 0);
191
192 /* Add arena to tracking list */
193 insert_arena(arena);
194
195 return NO_ERROR;
196 }
197
pmm_paddr_to_kvaddr(paddr_t pa)198 void *pmm_paddr_to_kvaddr(paddr_t pa) {
199 pmm_arena_t *a;
200 void *va = NULL;
201 spin_lock_saved_state_t state;
202
203 spin_lock_save(&aux_slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
204 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
205 if (a->kvaddr && ADDRESS_IN_ARENA(pa, a)) {
206 va = (void *)(a->kvaddr + (pa - a->base));
207 break;
208 }
209 }
210 spin_unlock_restore(&aux_slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
211
212 return va;
213 }
214
pmm_add_arena_late_etc(pmm_arena_t * arena,size_t reserve_at_start,size_t reserve_at_end)215 status_t pmm_add_arena_late_etc(pmm_arena_t *arena,
216 size_t reserve_at_start,
217 size_t reserve_at_end)
218 {
219 void *va;
220 size_t page_count;
221 spin_lock_saved_state_t state;
222
223 LTRACEF("arena %p name '%s' base 0x%" PRIxPADDR " size 0x%zx\n",
224 arena, arena->name, arena->base, arena->size);
225
226 DEBUG_ASSERT(arena);
227 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base));
228 DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size));
229 DEBUG_ASSERT(arena->size > 0);
230
231 /* zero out some of the structure */
232 arena->free_count = 0;
233 arena->reserved_count = 0;
234 list_initialize(&arena->free_list);
235
236 /* allocate an array of pages to back this one */
237 page_count = arena->size / PAGE_SIZE;
238
239 /* check if we have enough space to reserve everything */
240 if (round_up(reserve_at_start + page_count * sizeof(vm_page_t), PAGE_SIZE) +
241 round_up(reserve_at_end, PAGE_SIZE) > arena->size) {
242 return ERR_INVALID_ARGS;
243 }
244
245 if (arena->flags & PMM_ARENA_FLAG_KMAP) {
246 /* arena is already kmapped */
247 va = paddr_to_kvaddr(arena->base);
248 if (!va) {
249 return ERR_INVALID_ARGS;
250 }
251 } else {
252 /* map it */
253 status_t rc = vmm_alloc_physical_etc(vmm_get_kernel_aspace(),
254 arena->name, arena->size,
255 &va, 0, &arena->base, 1,
256 0, ARCH_MMU_FLAG_CACHED);
257 if (rc < 0) {
258 return rc;
259 }
260
261 arena->flags |= PMM_ARENA_FLAG_KMAP;
262 }
263
264 /* set kmap address */
265 arena->kvaddr = (vaddr_t)va;
266
267 /* place page tracking structure at base of arena (past reserve_at_start) */
268 arena->page_array = va + reserve_at_start;
269
270 /* reserve memory for page_array */
271 reserve_at_start += page_count * sizeof(vm_page_t);
272
273 init_page_array(arena, page_count,
274 round_up(reserve_at_start, PAGE_SIZE) / PAGE_SIZE,
275 round_up(reserve_at_end, PAGE_SIZE) / PAGE_SIZE);
276
277 /* Insert arena into tracking structure */
278 mutex_acquire(&lock);
279 spin_lock_save(&aux_slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
280 insert_arena(arena);
281 spin_unlock_restore(&aux_slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
282 mutex_release(&lock);
283
284 return NO_ERROR;
285 }
286
pmm_vmm_obj_check_flags(struct vmm_obj * obj,uint * arch_mmu_flags)287 static int pmm_vmm_obj_check_flags(struct vmm_obj *obj, uint *arch_mmu_flags)
288 {
289 return 0; /* Allow any flags for now */
290 }
291
pmm_vmm_obj_get_page(struct vmm_obj * obj,size_t offset,paddr_t * paddr,size_t * paddr_size)292 static int pmm_vmm_obj_get_page(struct vmm_obj *obj, size_t offset,
293 paddr_t *paddr, size_t *paddr_size)
294 {
295 struct pmm_vmm_obj *pmm_obj = vmm_obj_to_pmm_obj(obj);
296 size_t index;
297 size_t chunk_offset;
298
299 index = offset / pmm_obj->chunk_size;
300 chunk_offset = offset % pmm_obj->chunk_size;
301
302 if (index >= pmm_obj->chunk_count) {
303 return ERR_OUT_OF_RANGE;
304 }
305 *paddr = vm_page_to_paddr(pmm_obj->chunk[index]) + chunk_offset;
306 *paddr_size = pmm_obj->chunk_size - chunk_offset;
307 return 0;
308 }
309
pmm_vmm_obj_destroy(struct vmm_obj * obj)310 static void pmm_vmm_obj_destroy(struct vmm_obj *obj)
311 {
312 struct pmm_vmm_obj *pmm_obj = vmm_obj_to_pmm_obj(obj);
313
314 pmm_free(&pmm_obj->page_list);
315 if (pmm_obj->res_group) {
316 res_group_release_mem(pmm_obj->res_group, pmm_obj->used_pages);
317 res_group_del_ref(pmm_obj->res_group, &pmm_obj->res_group_ref);
318 }
319 free(pmm_obj);
320 }
321
322 static struct vmm_obj_ops pmm_vmm_obj_ops = {
323 .check_flags = pmm_vmm_obj_check_flags,
324 .get_page = pmm_vmm_obj_get_page,
325 .destroy = pmm_vmm_obj_destroy,
326 };
327
pmm_alloc_obj(size_t chunk_count,size_t chunk_size)328 static struct pmm_vmm_obj *pmm_alloc_obj(size_t chunk_count, size_t chunk_size)
329 {
330 struct pmm_vmm_obj *pmm_obj;
331
332 DEBUG_ASSERT(chunk_size % PAGE_SIZE == 0);
333
334 if (chunk_count == 0)
335 return NULL;
336
337 pmm_obj = calloc(
338 1, sizeof(*pmm_obj) + sizeof(pmm_obj->chunk[0]) * chunk_count);
339 if (!pmm_obj) {
340 return NULL;
341 }
342 pmm_obj->chunk_count = chunk_count;
343 pmm_obj->chunk_size = chunk_size;
344 list_initialize(&pmm_obj->page_list);
345
346 return pmm_obj;
347 }
348
pmm_arena_find_free_run(pmm_arena_t * a,uint count,uint8_t alignment_log2)349 static size_t pmm_arena_find_free_run(pmm_arena_t *a, uint count,
350 uint8_t alignment_log2) {
351 if (alignment_log2 < PAGE_SIZE_SHIFT)
352 alignment_log2 = PAGE_SIZE_SHIFT;
353
354 /* walk the list starting at alignment boundaries.
355 * calculate the starting offset into this arena, based on the
356 * base address of the arena to handle the case where the arena
357 * is not aligned on the same boundary requested.
358 */
359 paddr_t rounded_base = round_up(a->base, 1UL << alignment_log2);
360 if (rounded_base < a->base || rounded_base > a->base + (a->size - 1))
361 return ~0UL;
362
363 uint aligned_offset = (rounded_base - a->base) / PAGE_SIZE;
364 uint start = aligned_offset;
365 LTRACEF("starting search at aligned offset %u\n", start);
366 LTRACEF("arena base 0x%" PRIxPADDR " size %zu\n", a->base, a->size);
367
368 retry:
369 /*
370 * Search while we're still within the arena and have a chance of finding a
371 * slot (start + count < end of arena)
372 */
373 while ((start < a->size / PAGE_SIZE) &&
374 ((start + count) <= a->size / PAGE_SIZE)) {
375 vm_page_t *p = &a->page_array[start];
376 for (uint i = 0; i < count; i++) {
377 if (p->flags & VM_PAGE_FLAG_NONFREE) {
378 /* this run is broken, break out of the inner loop.
379 * start over at the next alignment boundary
380 */
381 start = round_up(start - aligned_offset + i + 1,
382 1UL << (alignment_log2 - PAGE_SIZE_SHIFT)) +
383 aligned_offset;
384 goto retry;
385 }
386 p++;
387 }
388
389 /* we found a run */
390 LTRACEF("found run from pn %u to %u\n", start, start + count);
391 return start;
392 }
393 return ~0UL;
394 }
395
check_available_pages(uint count,bool reserve)396 static uint check_available_pages(uint count, bool reserve) {
397 /* walk the arenas in order, allocating as many pages as we can from each */
398 pmm_arena_t *a;
399 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
400 ASSERT(a->free_count >= a->reserved_count);
401 size_t available_count = a->free_count - a->reserved_count;
402 if (!available_count) {
403 continue;
404 }
405 size_t reserved_count = MIN(count, available_count);
406 count -= reserved_count;
407 if (reserve) {
408 a->reserved_count += reserved_count;
409 }
410 if (!count) {
411 break;
412 }
413 }
414 return count;
415 }
416
pmm_reserve_pages(uint count)417 status_t pmm_reserve_pages(uint count) {
418 mutex_acquire(&lock);
419 uint remaining_count = check_available_pages(count, false);
420 if (remaining_count) {
421 mutex_release(&lock);
422 return ERR_NO_MEMORY;
423 } else {
424 check_available_pages(count, true);
425 }
426 mutex_release(&lock);
427 return NO_ERROR;
428 }
429
pmm_unreserve_pages(uint count)430 void pmm_unreserve_pages(uint count) {
431 /* walk the arenas in order, unreserving pages */
432 pmm_arena_t *a;
433 mutex_acquire(&lock);
434 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
435 size_t unreserved_count = MIN(count, a->reserved_count);
436 count -= unreserved_count;
437 a->reserved_count -= unreserved_count;
438 if (!count) {
439 mutex_release(&lock);
440 return;
441 }
442 }
443 mutex_release(&lock);
444 ASSERT(!count);
445 }
446
pmm_alloc_pages_locked(struct list_node * page_list,struct vm_page * pages[],uint count,uint32_t flags,uint8_t align_log2)447 static status_t pmm_alloc_pages_locked(struct list_node *page_list,
448 struct vm_page *pages[], uint count,
449 uint32_t flags, uint8_t align_log2)
450 {
451 uint allocated = 0;
452 size_t free_run_start = ~0UL;
453 struct list_node tmp_page_list = LIST_INITIAL_VALUE(tmp_page_list);
454
455 /* align_log2 is only supported when PMM_ALLOC_FLAG_CONTIGUOUS is set */
456 ASSERT(!align_log2 || (flags & PMM_ALLOC_FLAG_CONTIGUOUS));
457
458 if ((flags & PMM_ALLOC_FLAG_CONTIGUOUS) && (count == 1) &&
459 (align_log2 <= PAGE_SIZE_SHIFT)) {
460 /* pmm_arena_find_free_run is slow. Skip it if any page will do */
461 flags &= ~PMM_ALLOC_FLAG_CONTIGUOUS;
462 }
463
464 /* walk the arenas in order, allocating as many pages as we can from each */
465 pmm_arena_t *a;
466 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
467 ASSERT(a->free_count >= a->reserved_count);
468 if (flags & PMM_ALLOC_FLAG_KMAP && !(a->flags & PMM_ARENA_FLAG_KMAP)) {
469 /* caller requested mapped pages, but arena a is not mapped */
470 continue;
471 }
472
473 if (flags & PMM_ALLOC_FLAG_CONTIGUOUS) {
474 free_run_start = pmm_arena_find_free_run(a, count, align_log2);
475 if (free_run_start == ~0UL) {
476 continue;
477 }
478 }
479
480 while (allocated < count) {
481 if (flags & PMM_ALLOC_FLAG_FROM_RESERVED) {
482 if (!a->reserved_count) {
483 LTRACEF("no more reserved pages in the arena!\n");
484 break;
485 }
486 } else if (a->free_count <= a->reserved_count) {
487 LTRACEF("all pages reserved or used!\n");
488 break;
489 }
490
491 vm_page_t *page;
492 if (flags & PMM_ALLOC_FLAG_CONTIGUOUS) {
493 DEBUG_ASSERT(free_run_start < a->size / PAGE_SIZE);
494 page = &a->page_array[free_run_start++];
495 DEBUG_ASSERT(!(page->flags & VM_PAGE_FLAG_NONFREE));
496 DEBUG_ASSERT(list_in_list(&page->node));
497 list_delete(&page->node);
498 } else {
499 page = list_remove_head_type(&a->free_list, vm_page_t, node);
500 if (!page)
501 break;
502 }
503
504 /*
505 * Don't clear tagged pages here, as the page and tags will be
506 * cleared later.
507 */
508 if (!(flags & PMM_ALLOC_FLAG_NO_CLEAR)) {
509 clear_page(page);
510 }
511
512 if (flags & PMM_ALLOC_FLAG_FROM_RESERVED) {
513 a->reserved_count--;
514 page->flags |= VM_PAGE_FLAG_RESERVED;
515 }
516 a->free_count--;
517
518 page->flags |= VM_PAGE_FLAG_NONFREE;
519 if (pages && (!allocated || !(flags & PMM_ALLOC_FLAG_CONTIGUOUS))) {
520 /*
521 * If PMM_ALLOC_FLAG_CONTIGUOUS is set, then @pages has a single
522 * entry, otherwise it has @count entries.
523 */
524 pages[allocated] = page;
525 }
526 list_add_tail(&tmp_page_list, &page->node);
527
528 allocated++;
529 }
530 }
531
532 if (allocated != count) {
533 pmm_free_locked(&tmp_page_list);
534 return ERR_NO_MEMORY;
535 }
536 if (page_list) {
537 list_splice_tail(page_list, &tmp_page_list);
538 }
539 return 0;
540 }
541
pmm_alloc_from_res_group(struct vmm_obj ** objp,struct obj_ref * ref,struct res_group * res_group,uint count,uint32_t flags,uint8_t align_log2)542 status_t pmm_alloc_from_res_group(struct vmm_obj **objp, struct obj_ref* ref, struct res_group* res_group, uint count,
543 uint32_t flags, uint8_t align_log2)
544 {
545 status_t ret;
546 struct pmm_vmm_obj *pmm_obj;
547
548 DEBUG_ASSERT(objp);
549 DEBUG_ASSERT(ref);
550 DEBUG_ASSERT(!obj_ref_active(ref));
551 DEBUG_ASSERT(count > 0);
552
553 LTRACEF("count %u\n", count);
554 if (flags & PMM_ALLOC_FLAG_FROM_RESERVED) {
555 ASSERT(res_group);
556 }
557 if (res_group) {
558 ASSERT(flags & PMM_ALLOC_FLAG_FROM_RESERVED);
559 ret = res_group_take_mem(res_group, count);
560 if (ret) {
561 goto err_take_mem;
562 }
563 }
564 if (flags & PMM_ALLOC_FLAG_CONTIGUOUS) {
565 /*
566 * When allocating a physically contiguous region we don't need a
567 * pointer to every page. Allocate an object with one large page
568 * instead. This also allows the vmm to map the contiguous region more
569 * efficiently when the hardware supports it.
570 */
571 pmm_obj = pmm_alloc_obj(1, count * PAGE_SIZE);
572 } else {
573 pmm_obj = pmm_alloc_obj(count, PAGE_SIZE);
574 }
575 if (!pmm_obj) {
576 ret = ERR_NO_MEMORY;
577 goto err_alloc_pmm_obj;
578 }
579
580 mutex_acquire(&lock);
581 ret = pmm_alloc_pages_locked(&pmm_obj->page_list, pmm_obj->chunk, count,
582 flags, align_log2);
583 if (flags & PMM_ALLOC_FLAG_NO_CLEAR) {
584 pmm_obj->flags |= PMM_OBJ_FLAG_NEEDS_CLEAR;
585 }
586 if (flags & PMM_ALLOC_FLAG_ALLOW_TAGGED) {
587 ASSERT(arch_tagging_enabled());
588 pmm_obj->flags |= PMM_OBJ_FLAG_ALLOW_TAGGED;
589 }
590 mutex_release(&lock);
591
592 if (ret) {
593 goto err_alloc_pages;
594 }
595
596 if (res_group) {
597 obj_ref_init(&pmm_obj->res_group_ref);
598 res_group_add_ref(res_group, &pmm_obj->res_group_ref);
599 pmm_obj->res_group = res_group;
600 pmm_obj->used_pages = count;
601 }
602
603 vmm_obj_init(&pmm_obj->vmm_obj, ref, &pmm_vmm_obj_ops);
604 *objp = &pmm_obj->vmm_obj;
605 return NO_ERROR;
606
607 err_alloc_pages:
608 free(pmm_obj);
609 err_alloc_pmm_obj:
610 if (res_group) {
611 res_group_release_mem(res_group, count);
612 }
613 err_take_mem:
614 return ret;
615 }
616
pmm_vmm_is_pmm_obj(struct vmm_obj * vmm)617 static bool pmm_vmm_is_pmm_obj(struct vmm_obj* vmm) {
618 return (vmm && vmm->ops == &pmm_vmm_obj_ops);
619 }
620
pmm_vmm_is_pmm_that_needs_clear(struct vmm_obj * vmm)621 bool pmm_vmm_is_pmm_that_needs_clear(struct vmm_obj* vmm) {
622 if (pmm_vmm_is_pmm_obj(vmm)) {
623 struct pmm_vmm_obj* pmm = vmm_obj_to_pmm_obj(vmm);
624 return pmm->flags & PMM_OBJ_FLAG_NEEDS_CLEAR;
625 }
626 return false;
627 }
628
pmm_vmm_is_pmm_that_allows_tagged(struct vmm_obj * vmm)629 bool pmm_vmm_is_pmm_that_allows_tagged(struct vmm_obj* vmm) {
630 if (pmm_vmm_is_pmm_obj(vmm)) {
631 struct pmm_vmm_obj* pmm = vmm_obj_to_pmm_obj(vmm);
632 return pmm->flags & PMM_OBJ_FLAG_ALLOW_TAGGED;
633 }
634 return false;
635 }
636
pmm_set_cleared(struct vmm_obj * vmm,size_t offset,size_t size)637 void pmm_set_cleared(struct vmm_obj* vmm, size_t offset, size_t size) {
638 ASSERT(pmm_vmm_is_pmm_that_needs_clear(vmm));
639 struct pmm_vmm_obj* pmm = vmm_obj_to_pmm_obj(vmm);
640 /*
641 * check that the caller cleared the entire object, since
642 * we only keep track of the cleared state at the object level
643 */
644 ASSERT(offset == 0);
645 ASSERT(size == pmm->chunk_count * pmm->chunk_size);
646 pmm->flags &= ~PMM_OBJ_FLAG_NEEDS_CLEAR;
647 }
648
pmm_set_tagged(struct vmm_obj * vmm)649 void pmm_set_tagged(struct vmm_obj* vmm) {
650 ASSERT(pmm_vmm_is_pmm_that_allows_tagged(vmm));
651 struct pmm_vmm_obj* pmm = vmm_obj_to_pmm_obj(vmm);
652 pmm->flags &= ~PMM_OBJ_FLAG_ALLOW_TAGGED;
653 }
654
pmm_alloc_range(paddr_t address,uint count,struct list_node * list)655 size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list)
656 {
657 LTRACEF("address 0x%" PRIxPADDR ", count %u\n", address, count);
658
659 DEBUG_ASSERT(list);
660
661 uint allocated = 0;
662 if (count == 0)
663 return 0;
664
665 address = round_down(address, PAGE_SIZE);
666
667 mutex_acquire(&lock);
668
669 /* walk through the arenas, looking to see if the physical page belongs to it */
670 pmm_arena_t *a;
671 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
672 while (allocated < count && ADDRESS_IN_ARENA(address, a)) {
673 if (a->free_count <= a->reserved_count) {
674 LTRACEF("all pages reserved or used!\n");
675 break;
676 }
677 size_t index = (address - a->base) / PAGE_SIZE;
678
679 DEBUG_ASSERT(index < a->size / PAGE_SIZE);
680
681 vm_page_t *page = &a->page_array[index];
682 if (page->flags & VM_PAGE_FLAG_NONFREE) {
683 /* we hit an allocated page */
684 break;
685 }
686
687 DEBUG_ASSERT(list_in_list(&page->node));
688
689 list_delete(&page->node);
690 page->flags |= VM_PAGE_FLAG_NONFREE;
691 list_add_tail(list, &page->node);
692
693 a->free_count--;
694 allocated++;
695 address += PAGE_SIZE;
696 }
697
698 if (allocated == count)
699 break;
700 }
701
702 mutex_release(&lock);
703 return allocated;
704 }
705
pmm_free_locked(struct list_node * list)706 static size_t pmm_free_locked(struct list_node *list)
707 {
708 LTRACEF("list %p\n", list);
709
710 DEBUG_ASSERT(list);
711
712 uint count = 0;
713 while (!list_is_empty(list)) {
714 vm_page_t *page = list_remove_head_type(list, vm_page_t, node);
715
716 DEBUG_ASSERT(!list_in_list(&page->node));
717 DEBUG_ASSERT(page->flags & VM_PAGE_FLAG_NONFREE);
718
719 /* see which arena this page belongs to and add it */
720 pmm_arena_t *a;
721 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
722 if (PAGE_BELONGS_TO_ARENA(page, a)) {
723 page->flags &= ~VM_PAGE_FLAG_NONFREE;
724
725 list_add_head(&a->free_list, &page->node);
726 a->free_count++;
727 if (page->flags & VM_PAGE_FLAG_RESERVED) {
728 a->reserved_count++;
729 page->flags &= ~VM_PAGE_FLAG_RESERVED;
730 }
731 count++;
732 break;
733 }
734 }
735 }
736
737 return count;
738 }
739
pmm_free(struct list_node * list)740 size_t pmm_free(struct list_node *list)
741 {
742 size_t ret;
743 LTRACEF("list %p\n", list);
744
745 DEBUG_ASSERT(list);
746
747 mutex_acquire(&lock);
748 ret = pmm_free_locked(list);
749 mutex_release(&lock);
750
751 return ret;
752 }
753
pmm_free_page(vm_page_t * page)754 size_t pmm_free_page(vm_page_t *page)
755 {
756 DEBUG_ASSERT(page);
757
758 struct list_node list;
759 list_initialize(&list);
760
761 list_add_head(&list, &page->node);
762
763 return pmm_free(&list);
764 }
765
766 /* physically allocate a run from arenas marked as KMAP */
pmm_alloc_kpages(uint count,struct list_node * list)767 void *pmm_alloc_kpages(uint count, struct list_node *list)
768 {
769 LTRACEF("count %u\n", count);
770
771 // XXX do fast path for single page
772
773
774 paddr_t pa;
775 size_t alloc_count = pmm_alloc_contiguous(count, PAGE_SIZE_SHIFT, &pa, list);
776 if (alloc_count == 0)
777 return NULL;
778
779 return paddr_to_kvaddr(pa);
780 }
781
pmm_free_kpages(void * _ptr,uint count)782 size_t pmm_free_kpages(void *_ptr, uint count)
783 {
784 LTRACEF("ptr %p, count %u\n", _ptr, count);
785
786 uint8_t *ptr = (uint8_t *)_ptr;
787
788 struct list_node list;
789 list_initialize(&list);
790
791 while (count > 0) {
792 vm_page_t *p = paddr_to_vm_page(vaddr_to_paddr(ptr));
793 if (p) {
794 list_add_tail(&list, &p->node);
795 }
796
797 ptr += PAGE_SIZE;
798 count--;
799 }
800
801 return pmm_free(&list);
802 }
803
pmm_alloc_contiguous(uint count,uint8_t alignment_log2,paddr_t * pa,struct list_node * list)804 size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list)
805 {
806 status_t ret;
807 struct vm_page *page;
808 LTRACEF("count %u, align %u\n", count, alignment_log2);
809
810 if (count == 0)
811 return 0;
812 if (alignment_log2 < PAGE_SIZE_SHIFT)
813 alignment_log2 = PAGE_SIZE_SHIFT;
814
815 mutex_acquire(&lock);
816 ret = pmm_alloc_pages_locked(list, &page, count, PMM_ALLOC_FLAG_KMAP |
817 PMM_ALLOC_FLAG_CONTIGUOUS, alignment_log2);
818 mutex_release(&lock);
819 if (ret) {
820 return 0;
821 }
822 if (pa) {
823 *pa = vm_page_to_paddr(page);
824 }
825
826 return count;
827 }
828
dump_page(const vm_page_t * page)829 static void dump_page(const vm_page_t *page)
830 {
831 DEBUG_ASSERT(page);
832
833 printf("page %p: address 0x%" PRIxPADDR " flags 0x%x\n", page, vm_page_to_paddr(page), page->flags);
834 }
835
dump_arena(const pmm_arena_t * arena,bool dump_pages)836 static void dump_arena(const pmm_arena_t *arena, bool dump_pages)
837 {
838 DEBUG_ASSERT(arena);
839
840 printf("arena %p: name '%s' base 0x%" PRIxPADDR " size 0x%zx priority %u flags 0x%x\n",
841 arena, arena->name, arena->base, arena->size, arena->priority, arena->flags);
842 printf("\tpage_array %p, free_count %zu\n",
843 arena->page_array, arena->free_count);
844
845 /* dump all of the pages */
846 if (dump_pages) {
847 for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
848 dump_page(&arena->page_array[i]);
849 }
850 }
851
852 /* dump the free pages */
853 printf("\tfree ranges:\n");
854 ssize_t last = -1;
855 for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
856 if (page_is_free(&arena->page_array[i])) {
857 if (last == -1) {
858 last = i;
859 }
860 } else {
861 if (last != -1) {
862 printf("\t\t0x%" PRIxPADDR " - 0x%" PRIxPADDR "\n", arena->base + last * PAGE_SIZE, arena->base + i * PAGE_SIZE);
863 }
864 last = -1;
865 }
866 }
867
868 if (last != -1) {
869 printf("\t\t0x%" PRIxPADDR " - 0x%" PRIxPADDR "\n", arena->base + last * PAGE_SIZE, arena->base + arena->size);
870 }
871 }
872
cmd_pmm(int argc,const cmd_args * argv)873 static int cmd_pmm(int argc, const cmd_args *argv)
874 {
875 if (argc < 2) {
876 notenoughargs:
877 printf("not enough arguments\n");
878 usage:
879 printf("usage:\n");
880 printf("%s arenas\n", argv[0].str);
881 printf("%s alloc <count>\n", argv[0].str);
882 printf("%s alloc_range <address> <count>\n", argv[0].str);
883 printf("%s alloc_kpages <count>\n", argv[0].str);
884 printf("%s alloc_contig <count> <alignment>\n", argv[0].str);
885 printf("%s dump_alloced\n", argv[0].str);
886 printf("%s free_alloced\n", argv[0].str);
887 return ERR_GENERIC;
888 }
889
890 static struct list_node allocated = LIST_INITIAL_VALUE(allocated);
891
892 if (!strcmp(argv[1].str, "arenas")) {
893 pmm_arena_t *a;
894 list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
895 dump_arena(a, false);
896 }
897 } else if (!strcmp(argv[1].str, "dump_alloced")) {
898 vm_page_t *page;
899
900 list_for_every_entry(&allocated, page, vm_page_t, node) {
901 dump_page(page);
902 }
903 } else if (!strcmp(argv[1].str, "alloc_range")) {
904 if (argc < 4) goto notenoughargs;
905
906 struct list_node list;
907 list_initialize(&list);
908
909 uint count = pmm_alloc_range(argv[2].u, argv[3].u, &list);
910 printf("alloc returns %u\n", count);
911
912 vm_page_t *p;
913 list_for_every_entry(&list, p, vm_page_t, node) {
914 printf("\tpage %p, address 0x%" PRIxPADDR "\n", p, vm_page_to_paddr(p));
915 }
916
917 /* add the pages to the local allocated list */
918 struct list_node *node;
919 while ((node = list_remove_head(&list))) {
920 list_add_tail(&allocated, node);
921 }
922 } else if (!strcmp(argv[1].str, "alloc_kpages")) {
923 if (argc < 3) goto notenoughargs;
924
925 void *ptr = pmm_alloc_kpages(argv[2].u, NULL);
926 printf("pmm_alloc_kpages returns %p\n", ptr);
927 } else if (!strcmp(argv[1].str, "alloc_contig")) {
928 if (argc < 4) goto notenoughargs;
929
930 struct list_node list;
931 list_initialize(&list);
932
933 paddr_t pa;
934 size_t ret = pmm_alloc_contiguous(argv[2].u, argv[3].u, &pa, &list);
935 printf("pmm_alloc_contiguous returns %zu, address 0x%" PRIxPADDR "\n", ret, pa);
936 printf("address %% align = 0x%lx\n", pa % argv[3].u);
937
938 /* add the pages to the local allocated list */
939 struct list_node *node;
940 while ((node = list_remove_head(&list))) {
941 list_add_tail(&allocated, node);
942 }
943 } else if (!strcmp(argv[1].str, "free_alloced")) {
944 size_t err = pmm_free(&allocated);
945 printf("pmm_free returns %zu\n", err);
946 } else {
947 printf("unknown command\n");
948 goto usage;
949 }
950
951 return NO_ERROR;
952 }
953
954 STATIC_COMMAND_START
955 #if LK_DEBUGLEVEL > 0
956 STATIC_COMMAND("pmm", "physical memory manager", &cmd_pmm)
957 #endif
958 STATIC_COMMAND_END(pmm);
959
960
961
962
963