1 /*
2  * Copyright (c) 2014 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #pragma once
24 
25 /* some assembly #defines, need to match the structure below */
26 #if IS_64BIT
27 #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0
28 #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 8
29 #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 16
30 #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 24
31 #define __MMU_INITIAL_MAPPING_SIZE        40
32 #else
33 #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0
34 #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 4
35 #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 8
36 #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 12
37 #define __MMU_INITIAL_MAPPING_SIZE        20
38 #endif
39 
40 /* flags for initial mapping struct */
41 #define MMU_INITIAL_MAPPING_TEMPORARY     (0x1)
42 #define MMU_INITIAL_MAPPING_FLAG_UNCACHED (0x2)
43 #define MMU_INITIAL_MAPPING_FLAG_DEVICE   (0x4)
44 #define MMU_INITIAL_MAPPING_FLAG_DYNAMIC  (0x8)  /* entry has to be patched up by platform_reset */
45 
46 #ifndef ASSEMBLY
47 
48 #include <sys/types.h>
49 #include <stdint.h>
50 #include <compiler.h>
51 #include <list.h>
52 #include <stdlib.h>
53 #include <arch.h>
54 #include <arch/mmu.h>
55 #include <arch/tbi.h>
56 #include <kernel/vm_obj.h>
57 #include <lib/binary_search_tree.h>
58 #include <lk/reflist.h>
59 
60 __BEGIN_CDECLS
61 
page_align(uintptr_t p)62 static inline uintptr_t page_align(uintptr_t p) {
63     return align(p, PAGE_SIZE);
64 }
65 
66 #define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE)
67 
68 struct mmu_initial_mapping {
69     paddr_t phys;
70     vaddr_t virt;
71     size_t  size;
72     unsigned int flags;
73     const char *name;
74 };
75 
76 /* Assert that the assembly macros above match this struct. */
77 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, phys) == __MMU_INITIAL_MAPPING_PHYS_OFFSET);
78 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, virt) == __MMU_INITIAL_MAPPING_VIRT_OFFSET);
79 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, size) == __MMU_INITIAL_MAPPING_SIZE_OFFSET);
80 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, flags) == __MMU_INITIAL_MAPPING_FLAGS_OFFSET);
81 STATIC_ASSERT(sizeof(struct mmu_initial_mapping) == __MMU_INITIAL_MAPPING_SIZE);
82 
83 /* Platform or target must fill out one of these to set up the initial memory map
84  * for kernel and enough IO space to boot.
85  */
86 extern struct mmu_initial_mapping mmu_initial_mappings[];
87 
88 /* core per page structure */
89 typedef struct vm_page {
90     struct list_node node;
91 
92     uint flags : 8;
93     uint ref : 24;
94 } vm_page_t;
95 
96 #define VM_PAGE_FLAG_NONFREE  (0x1)
97 #define VM_PAGE_FLAG_RESERVED (0x2)
98 
99 /* kernel address space */
100 #ifndef KERNEL_ASPACE_BASE
101 #define KERNEL_ASPACE_BASE ((vaddr_t)0x80000000UL)
102 #endif
103 #ifndef KERNEL_ASPACE_SIZE
104 #define KERNEL_ASPACE_SIZE ((vaddr_t)0x80000000UL)
105 #endif
106 
107 STATIC_ASSERT(KERNEL_ASPACE_BASE + (KERNEL_ASPACE_SIZE - 1) > KERNEL_ASPACE_BASE);
108 
is_kernel_address(vaddr_t va)109 static inline bool is_kernel_address(vaddr_t va)
110 {
111     va = arch_adjusted_vaddr(va, true);
112     return (va >= (vaddr_t)KERNEL_ASPACE_BASE && va <= ((vaddr_t)KERNEL_ASPACE_BASE + ((vaddr_t)KERNEL_ASPACE_SIZE - 1)));
113 }
114 
115 /* user address space, defaults to below kernel space with a 16MB guard gap on either side */
116 #ifndef USER_ASPACE_BASE
117 #define USER_ASPACE_BASE ((vaddr_t)0x01000000UL)
118 #endif
119 #ifndef USER_ASPACE_SIZE
120 #define USER_ASPACE_SIZE ((vaddr_t)KERNEL_ASPACE_BASE - USER_ASPACE_BASE - 0x01000000UL)
121 #endif
122 
123 STATIC_ASSERT(USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1) > USER_ASPACE_BASE);
124 
is_user_address(vaddr_t va)125 static inline bool is_user_address(vaddr_t va)
126 {
127     va = arch_adjusted_vaddr(va, false);
128     return (va >= USER_ASPACE_BASE && va <= (USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1)));
129 }
130 
131 /* physical allocator */
132 typedef struct pmm_arena {
133     struct list_node node;
134     const char *name;
135 
136     uint flags;
137     uint priority;
138 
139     paddr_t base;
140     vaddr_t kvaddr;
141     size_t  size;
142 
143     size_t free_count;
144     /* A subset of free pages that can only be allocated using PMM_ALLOC_FLAG_FROM_RESERVED */
145     size_t reserved_count;
146 
147     struct vm_page *page_array;
148     struct list_node free_list;
149 } pmm_arena_t;
150 
151 #define PMM_ARENA_FLAG_KMAP (0x1) /* this arena is already mapped and useful for kallocs */
152 
153 /* Add a pre-filled memory arena to the physical allocator. */
154 status_t pmm_add_arena(pmm_arena_t *arena);
155 
156 /**
157  * pmm_add_arena_late_etc() - Add memory arena
158  * @arena: pointer to pre-filled &struct arena
159  * @reserve_at_beg: number of bytes that needs to be reserved
160  *  (marked as in use) at the beginning of arena memory region
161  * @reserve_at_end: number of bytes that needs to be reserved
162  * (marked as in use) at the end of arena memory region
163  *
164  * Return: 0 on success, negative err otherwise
165  */
166 status_t pmm_add_arena_late_etc(pmm_arena_t *arena,
167                                 size_t reserve_at_beg,
168                                 size_t reserve_at_end);
169 
170 /* Add a pre-filled arena during late (post vm) stage of boot */
pmm_add_arena_late(pmm_arena_t * arena)171 static status_t pmm_add_arena_late(pmm_arena_t *arena) {
172     return pmm_add_arena_late_etc(arena, 0, 0);
173 }
174 
175 /* Optional flags passed to pmm_alloc */
176 #define PMM_ALLOC_FLAG_KMAP (1U << 0)
177 #define PMM_ALLOC_FLAG_CONTIGUOUS (1U << 1)
178 #define PMM_ALLOC_FLAG_FROM_RESERVED (1U << 2)
179 #define PMM_ALLOC_FLAG_NO_CLEAR (1U << 3)
180 #define PMM_ALLOC_FLAG_ALLOW_TAGGED (1U << 4)
181 
182 struct res_group;
183 
184 /**
185  * pmm_alloc_from_res_group - Allocate and clear @count pages of physical memory.
186  * @objp:       Pointer to returned vmm_obj (untouched if return code is not 0).
187  * @ref:        Reference to add to *@objp (untouched if return code is not 0).
188  * @res_group:  The resource group to use to track this allocation (if not NULL).
189  * @count:      Number of pages to allocate. Must be greater than 0.
190  * @flags:      Bitmask to optionally restrict allocation to areas that are
191  *              already mapped in the kernel, PMM_ALLOC_FLAG_KMAP (e.g for
192  *              kernel heap and page tables) and/or to allocate a single
193  *              physically contiguous range, PMM_ALLOC_FLAG_CONTIGUOUS.
194  * @align_log2: Alignment needed for contiguous allocation, 0 otherwise.
195  *
196  * Allocate and initialize a vmm_obj that tracks the allocated pages.
197  *
198  * Return: 0 on success, ERR_NO_MEMORY if there is not enough memory free to
199  *         allocate the vmm_obj or the requested page count.
200  */
201 status_t pmm_alloc_from_res_group(struct vmm_obj **objp, struct obj_ref* ref, struct res_group* res_group, uint count,
202                    uint32_t flags, uint8_t align_log2);
203 
204 /**
205  * pmm_alloc - Allocate and clear @count pages of physical memory.
206  * @objp:       Pointer to returned vmm_obj (untouched if return code is not 0).
207  * @ref:        Reference to add to *@objp (untouched if return code is not 0).
208  * @count:      Number of pages to allocate. Must be greater than 0.
209  * @flags:      Bitmask to optionally restrict allocation to areas that are
210  *              already mapped in the kernel, PMM_ALLOC_FLAG_KMAP (e.g for
211  *              kernel heap and page tables) and/or to allocate a single
212  *              physically contiguous range, PMM_ALLOC_FLAG_CONTIGUOUS.
213  * @align_log2: Alignment needed for contiguous allocation, 0 otherwise.
214  *
215  * Allocate and initialize a vmm_obj that tracks the allocated pages.
216  *
217  * Return: 0 on success, ERR_NO_MEMORY if there is not enough memory free to
218  *         allocate the vmm_obj or the requested page count.
219  *
220  * Same as `pmm_alloc_from_res_group` above, but with res_group set to `NULL`.
221  */
pmm_alloc(struct vmm_obj ** objp,struct obj_ref * ref,uint count,uint32_t flags,uint8_t align_log2)222 static inline status_t pmm_alloc(struct vmm_obj **objp, struct obj_ref* ref, uint count,
223                    uint32_t flags, uint8_t align_log2) {
224     return pmm_alloc_from_res_group(objp, ref, NULL, count, flags, align_log2);
225 }
226 
227 /* Allocate a specific range of physical pages, adding to the tail of the passed list.
228  * The list must be initialized.
229  * Returns the number of pages allocated.
230  * NOTE: This function does not clear the allocated pages
231  */
232 size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) __WARN_UNUSED_RESULT;
233 
234 /* Free a list of physical pages.
235  * Returns the number of pages freed.
236  */
237 size_t pmm_free(struct list_node *list);
238 
239 /* Helper routine for the above. */
240 size_t pmm_free_page(vm_page_t *page);
241 
242 /* Allocate and clear a run of contiguous pages, aligned on log2 byte boundary (0-31)
243  * If the optional physical address pointer is passed, return the address.
244  * If the optional list is passed, append the allocate page structures to the tail of the list.
245  */
246 size_t pmm_alloc_contiguous(uint count, uint8_t align_log2, paddr_t *pa, struct list_node *list);
247 
248 /* Allocate and clear a run of pages out of the kernel area and return the pointer in kernel space.
249  * If the optional list is passed, append the allocate page structures to the tail of the list.
250  */
251 void *pmm_alloc_kpages(uint count, struct list_node *list);
252 
253 /* Helper routine for pmm_alloc_kpages. */
pmm_alloc_kpage(void)254 static inline void *pmm_alloc_kpage(void) { return pmm_alloc_kpages(1, NULL); }
255 
256 size_t pmm_free_kpages(void *ptr, uint count);
257 
258 /* assign physical addresses and sizes to the dynamic entries in the initial
259  * mappings
260  */
261 void vm_assign_initial_dynamic(paddr_t kernel_start, size_t ram_size);
262 
263 /* map the initial mappings */
264 void vm_map_initial_mappings(void);
265 
266 /* physical to virtual */
267 void *paddr_to_kvaddr(paddr_t pa);
268 
269 /* a hint as to which virtual addresses will be returned by pmm_alloc_kpages */
270 void *kvaddr_get_range(size_t* size_return);
271 
272 /* virtual to physical */
273 paddr_t vaddr_to_paddr(void *va);
274 
275 /* vm_page_t to physical address */
276 paddr_t vm_page_to_paddr(const vm_page_t *page);
277 
278 /* paddr to vm_page_t */
279 vm_page_t *paddr_to_vm_page(paddr_t addr);
280 
281 /* virtual allocator */
282 typedef struct vmm_aspace {
283     struct list_node node;
284     /* used for allocations with VMM_FLAG_QUOTA set */
285     struct res_group* quota_res_group;
286     struct obj_ref quota_res_group_ref;
287     char name[32];
288 
289     uint flags;
290 
291     vaddr_t base;
292     size_t  size;
293 
294     struct bst_root regions;
295 
296     arch_aspace_t arch_aspace;
297 } vmm_aspace_t;
298 
299 #define VMM_ASPACE_FLAG_KERNEL 0x1
300 #define VMM_ASPACE_FLAG_BTI    0x2
301 
302 /**
303  * struct vmm_obj_slice - range of memory backed by a &struct vmm_obj
304  * @obj:     backing object for the slice
305  * @obj_ref: reference to keep the backing object alive
306  * @offset:  offset in bytes into the object at which the slice begins
307  * @size:    number of bytes in the slice
308  *
309  * &struct vmm_obj_slice is intended to represent a particular range of
310  * memory in a backing object for those cases where something other than
311  * the entire backing object will be used.
312  *
313  * Must be initialized with vmm_obj_slice_init() or
314  * VMM_OBJ_SLICE_INITIAL_VALUE.
315  */
316 struct vmm_obj_slice {
317     struct vmm_obj *obj;
318     struct obj_ref obj_ref;
319     size_t offset;
320     size_t size;
321 };
322 
323 #define VMM_OBJ_SLICE_INITIAL_VALUE(slice)                 \
324     {                                                      \
325         .obj = NULL,                                       \
326         .obj_ref = OBJ_REF_INITIAL_VALUE((slice).obj_ref), \
327         .offset = 0,                                       \
328         .size = 0,                                         \
329     }
330 
331 /**
332  * vmm_obj_slice_init() - initializes a &struct vmm_obj_slice
333  * @slice: slice to initialize
334  */
335 void vmm_obj_slice_init(struct vmm_obj_slice *slice);
336 
337 /**
338  * vmm_obj_slice_bind() - bind a vmm_obj_slice to a particular vmm_obj
339  * @slice:  Slice to bind (should be initialized and unused).
340  * @obj:    vmm_obj to bind the slice to
341  * @offset: Starting offset into the vmm_obj
342  * @size:   Size of the slice.
343  *
344  * Attaches a subrange of a particular &struct vmm_obj to the slice.
345  * The caller is responsible for validating the offset and size.
346  */
347 void vmm_obj_slice_bind(struct vmm_obj_slice *slice, struct vmm_obj *obj,
348                         size_t offset, size_t size);
349 
350 /**
351  * vmm_obj_slice_release() - release reference held by a &struct vmm_obj_slice
352  * @slice: slice to release
353  *
354  * Releases any resource attached to the slice.
355  *
356  * Note: This assumes that a non-NULL obj implies the obj_ref field is
357  *       releasable. This invariant will hold if you have used the API to
358  *       interact with the slice, but if you have updated a field manually,
359  *       it is the responsiblity of the caller to ensure this holds.
360  */
361 void vmm_obj_slice_release(struct vmm_obj_slice *slice);
362 
363 typedef struct vmm_region {
364     struct bst_node node;
365     char name[32];
366 
367     uint flags;
368     uint arch_mmu_flags;
369 
370     vaddr_t base;
371 
372     struct vmm_obj_slice obj_slice;
373 } vmm_region_t;
374 
375 #define VMM_REGION_FLAG_RESERVED 0x1
376 #define VMM_REGION_FLAG_PHYSICAL 0x2
377 #define VMM_REGION_FLAG_INTERNAL_MASK 0xffff
378 
379 /* grab a handle to the kernel address space */
380 extern vmm_aspace_t _kernel_aspace;
vmm_get_kernel_aspace(void)381 static inline vmm_aspace_t *vmm_get_kernel_aspace(void)
382 {
383     return &_kernel_aspace;
384 }
385 
386 /* virtual to container address space */
387 struct vmm_aspace *vaddr_to_aspace(void *ptr);
388 
389 /**
390  * vmm_lock_aspace() - Lock an address space so memory mapping can't change
391  * @aspace: The address space to lock
392  *
393  * Prevents changes to address space. Current implementation will lock all
394  * address spaces.
395  */
396 void vmm_lock_aspace(vmm_aspace_t *aspace);
397 
398 /**
399  * vmm_unlock_aspace() - Release lock on address space
400  * @aspace: The address space to unlock
401  */
402 void vmm_unlock_aspace(vmm_aspace_t *aspace);
403 
404 
405 /**
406  * vmm_find_spot() - Finds a gap of the requested size in the address space
407  * @aspace: The address space to locate a gap in
408  * @size:   How large of a gap is sought
409  * @out:    Output parameter for the base of the gap
410  *
411  * Finds a gap of size @size in @aspace, and outputs its address. If ASLR is
412  * active, this location will be randomized.
413  *
414  * This function *DOES NOT* actually allocate anything, it merely locates a
415  * prospective location. It is intended for use in situations where a larger
416  * gap than an individual mapping is required, such as in the case of the ELF
417  * loader (where text, rodata, and data are all separate mappings, but must
418  * have fixed relative offsets).
419  *
420  * The address returned is suitable for use with vmm_alloc() and similar
421  * functions with the VMM_FLAG_VALLOC_SPECIFIC flag.
422  *
423  * On ARM32, this function assumes the request is for *secure* memory
424  * for the purposes of region compatiblity.
425  *
426  * Return: Whether a spot was successfully located
427  */
428 bool vmm_find_spot(vmm_aspace_t *aspace, size_t size, vaddr_t *out);
429 
430 /* reserve a chunk of address space to prevent allocations from that space */
431 status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr);
432 
433 /* allocate a region of memory backed by vmm_obj */
434 status_t vmm_alloc_obj(vmm_aspace_t *aspace, const char *name,
435                        struct vmm_obj *obj, size_t offset, size_t size,
436                        void **ptr, uint8_t align_log2, uint vmm_flags,
437                        uint arch_mmu_flags);
438 
439 /* allocate a region of virtual space that maps a physical piece of address space.
440    the physical pages that back this are not allocated from the pmm. */
441 status_t vmm_alloc_physical_etc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, const paddr_t *paddr, uint paddr_count, uint vmm_flags, uint arch_mmu_flags);
442 
443 /* allocate a region of virtual space that maps a physical piece of address space.
444    the physical pages that back this are not allocated from the pmm. */
vmm_alloc_physical(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_log2,paddr_t paddr,uint vmm_flags,uint arch_mmu_flags)445 static inline status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
446 {
447     return vmm_alloc_physical_etc(aspace, name, size, ptr, align_log2,
448                                   &paddr, 1, vmm_flags, arch_mmu_flags);
449 }
450 
451 /* allocate a region of memory backed by newly allocated contiguous physical memory  */
452 status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags);
453 
454 /* allocate a region of memory, but do not back it by physical memory  */
455 status_t vmm_alloc_no_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags);
456 
457 /* allocate a region of memory backed by newly allocated physical memory */
458 status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags);
459 
460 /**
461  * vmm_find_region() - find a region in which specified virtual address resides
462  * @aspace: address space to look for @vaddr in
463  * @vaddr:  base virtual address to look for
464  *
465  * Must be called after vmm_lock_aspace(). The returned pointer is only valid
466  * until vmm_unlock_aspace() is called.
467  *
468  * Return: region struct or %NULL if @vaddr is not mapped in @aspace
469  */
470 vmm_region_t* vmm_find_region(const vmm_aspace_t* aspace, vaddr_t vaddr);
471 
472 /**
473  * vmm_get_obj() - Acquire a slice from a chunk of an &struct aspace
474  * @aspace: address space to extract from
475  * @vaddr:  base virtual address the slice should start at
476  * @size:   desired slice size
477  * @slice:  output parameter for the result slice, must not be null, should be
478  *          initialized
479  *
480  * Locates the &struct vmm_obj backing a particular address range within
481  * @aspace, and returns a slice representing it if possible. If the range
482  * is unmapped, has no vmm_obj backing, or spans multiple backing slices,
483  * an error will be returned.
484  *
485  * On success, @slice will be updated to refer to a subrange of the backing
486  * slice for the supplied virtual address range. On failure, @slice will be
487  * untouched.
488  *
489  * Return: Status code; any value other than NO_ERROR is a failure.
490  */
491 status_t vmm_get_obj(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size,
492                      struct vmm_obj_slice *slice);
493 
494 #define VMM_FREE_REGION_FLAG_EXPAND 0x1
495 
496 /* Unmap previously allocated region and free physical memory pages backing it (if any).
497    If flags is 0, va and size must match entire region.
498    If flags is VMM_FREE_REGION_FLAG_EXPAND, free entire region containin [va, va+size-1] */
499 status_t vmm_free_region_etc(vmm_aspace_t *aspace, vaddr_t va, size_t size, uint32_t flags);
500 
501 /* Unmap previously allocated region and free physical memory pages backing it (if any).
502    va can be anywhere in region. */
503 status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t va);
504 
505 /* For the above region creation routines. Allocate virtual space at the passed in pointer. */
506 #define VMM_FLAG_VALLOC_SPECIFIC 0x10000
507 
508 /*
509  * Disable default guard page before region. Can be used with
510  * VMM_FLAG_VALLOC_SPECIFIC if two regions need to be created with no gap.
511  */
512 #define VMM_FLAG_NO_START_GUARD 0x20000
513 
514 /*
515  * Disable default guard page before region. Can be used with
516  * VMM_FLAG_VALLOC_SPECIFIC if two regions need to be created with no gap.
517  */
518 #define VMM_FLAG_NO_END_GUARD 0x40000
519 
520 /*
521  * Do not allocate physical memory, only reserve the address range.
522  */
523 #define VMM_FLAG_NO_PHYSICAL 0x80000
524 
525 /*
526  * Count this allocation towards the app's memory usage quota.
527  */
528 #define VMM_FLAG_QUOTA 0x100000
529 
530 
531 /**
532  * vmm_create_aspace_with_quota() - Allocate a new address space with a size limit.
533  * @aspace: a pointer to set to the new aspace.
534  * @name: the name of the new aspace.
535  * @size: the size limit of the new aspace, 0 meaning no limit.
536  * @flags: the flags of the new aspace.
537  */
538 status_t vmm_create_aspace_with_quota(vmm_aspace_t **aspace, const char *name, size_t size, uint flags);
539 
vmm_create_aspace(vmm_aspace_t ** _aspace,const char * name,uint flags)540 static inline status_t vmm_create_aspace(vmm_aspace_t** _aspace,
541                            const char* name,
542                            uint flags) {
543     return vmm_create_aspace_with_quota(_aspace, name, 0, flags);
544 }
545 
546 /* destroy everything in the address space */
547 status_t vmm_free_aspace(vmm_aspace_t *aspace);
548 
549 /* internal routine by the scheduler to swap mmu contexts */
550 void vmm_context_switch(vmm_aspace_t *oldspace, vmm_aspace_t *newaspace);
551 
552 /* set the current user aspace as active on the current thread.
553    NULL is a valid argument, which unmaps the current user address space */
554 void vmm_set_active_aspace(vmm_aspace_t *aspace);
555 
556 /**
557  * vmm_get_address_description() - get a descriptive name for the given address
558  * @vaddr: the address to get a descriptive name for
559  * @name: a place to store the name
560  * @name_size: the size of the output buffer
561  *
562  * Gets a descriptive name for the given address and returns it in the 'name'
563  * buffer, up to 'name_size' bytes including the null terminator.
564  * If the address falls inside of a region, the name of the region will be
565  * returned. If the address is not in a region, but falls just before and/or
566  * after another region, it will return a string indicating the distance in
567  * bytes from those region(s).
568  * If the address is not in or adjacent to a region, the description will say
569  * "<no region>", and if the region cannot be determined due to the vmm_lock
570  * being held, the returned description will say "<unavailable>".
571  */
572 void vmm_get_address_description(vaddr_t vaddr, char *name, size_t name_size);
573 
574 #define VMM_MAX_ADDRESS_DESCRIPTION_SIZE \
575     ((sizeof(((vmm_region_t*)0)->name) - 1) * 2 + \
576      sizeof("NNNN bytes after ") + \
577      sizeof(", NNNN bytes before ") - 1)
578 
579 /**
580  * update_relocation_entries() - Update all entries in the relocation table
581  *                               by subtracting a given value from each one.
582  * @relr_start: start of the relocation list.
583  * @relr_end: end of the relocation list.
584  * @reloc_delta: Value to subtract from each relocation entry.
585  *
586  * Iterates through all entries in the relocation table starting at @relr_start
587  * and subtracts @reloc_delta from each entry that encodes an absolute pointer.
588  * This is currently called to update the table emitted by the linker with
589  * kernel virtual addresses into a table containing physical addresses, so the
590  * subtractions should never underflow if @reloc_delta is the positive
591  * difference between the kernel's virtual and physical addresses.
592  */
593 void update_relocation_entries(uintptr_t* relr_start, uintptr_t* relr_end,
594                                uintptr_t reloc_delta);
595 /**
596  * relocate_kernel() - Apply the given list of relocations to the kernel.
597  * @relr_start: start of the relocation list.
598  * @relr_end: end of the relocation list.
599  * @old_base: current base address of the kernel.
600  * @new_base: target base address to relocate the kernel to.
601  *
602  * This function applies the given list of relative relocations to the kernel,
603  * moving the base of the kernel from @old_base to @new_base.
604  */
605 void relocate_kernel(uintptr_t* relr_start, uintptr_t* relr_end,
606                      uintptr_t old_base, uintptr_t new_base);
607 
608 /* allocate a buffer in early boot memory of the given size and alignment */
609 void *boot_alloc_memalign(size_t len, size_t alignment) __MALLOC;
610 
611 /* allocate a buffer in early boot memory of the given size and an 8 byte
612  * alignment
613  */
614 void *boot_alloc_mem(size_t len) __MALLOC;
615 
616 #ifdef KERNEL_BASE_ASLR
617 /* select a random address for KERNEL_BASE_ASLR */
618 vaddr_t aslr_randomize_kernel_base(vaddr_t kernel_base);
619 #else
aslr_randomize_kernel_base(vaddr_t kernel_base)620 static inline vaddr_t aslr_randomize_kernel_base(vaddr_t kernel_base) {
621     return kernel_base;
622 }
623 #endif
624 
625 __END_CDECLS
626 
627 #endif // !ASSEMBLY
628