1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2023 Google LLC
3 // Author: Ard Biesheuvel <[email protected]>
4
5 #include <linux/types.h>
6 #include <linux/sizes.h>
7
8 #include <asm/memory.h>
9 #include <asm/pgalloc.h>
10 #include <asm/pgtable.h>
11
12 #include "pi.h"
13
14 /**
15 * map_range - Map a contiguous range of physical pages into virtual memory
16 *
17 * @pte: Address of physical pointer to array of pages to
18 * allocate page tables from
19 * @start: Virtual address of the start of the range
20 * @end: Virtual address of the end of the range (exclusive)
21 * @pa: Physical address of the start of the range
22 * @prot: Access permissions of the range
23 * @level: Translation level for the mapping
24 * @tbl: The level @level page table to create the mappings in
25 * @may_use_cont: Whether the use of the contiguous attribute is allowed
26 * @va_offset: Offset between a physical page and its current mapping
27 * in the VA space
28 */
map_range(u64 * pte,u64 start,u64 end,u64 pa,pgprot_t prot,int level,pte_t * tbl,bool may_use_cont,u64 va_offset)29 void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
30 int level, pte_t *tbl, bool may_use_cont, u64 va_offset)
31 {
32 u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
33 pteval_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
34 int lshift = (3 - level) * (PAGE_SHIFT - 3);
35 u64 lmask = (PAGE_SIZE << lshift) - 1;
36
37 start &= PAGE_MASK;
38 pa &= PAGE_MASK;
39
40 /* Advance tbl to the entry that covers start */
41 tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
42
43 /*
44 * Set the right block/page bits for this level unless we are
45 * clearing the mapping
46 */
47 if (protval)
48 protval |= (level < 3) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
49
50 while (start < end) {
51 u64 next = min((start | lmask) + 1, PAGE_ALIGN(end));
52
53 if (level < 3 && (start | next | pa) & lmask) {
54 /*
55 * This chunk needs a finer grained mapping. Create a
56 * table mapping if necessary and recurse.
57 */
58 if (pte_none(*tbl)) {
59 *tbl = __pte(__phys_to_pte_val(*pte) |
60 PMD_TYPE_TABLE | PMD_TABLE_UXN);
61 *pte += PTRS_PER_PTE * sizeof(pte_t);
62 }
63 map_range(pte, start, next, pa, prot, level + 1,
64 (pte_t *)(__pte_to_phys(*tbl) + va_offset),
65 may_use_cont, va_offset);
66 } else {
67 /*
68 * Start a contiguous range if start and pa are
69 * suitably aligned
70 */
71 if (((start | pa) & cmask) == 0 && may_use_cont)
72 protval |= PTE_CONT;
73
74 /*
75 * Clear the contiguous attribute if the remaining
76 * range does not cover a contiguous block
77 */
78 if ((end & ~cmask) <= start)
79 protval &= ~PTE_CONT;
80
81 /* Put down a block or page mapping */
82 *tbl = __pte(__phys_to_pte_val(pa) | protval);
83 }
84 pa += next - start;
85 start = next;
86 tbl++;
87 }
88 }
89
create_init_idmap(pgd_t * pg_dir,pteval_t clrmask)90 asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, pteval_t clrmask)
91 {
92 u64 ptep = (u64)pg_dir + PAGE_SIZE;
93 pgprot_t text_prot = PAGE_KERNEL_ROX;
94 pgprot_t data_prot = PAGE_KERNEL;
95
96 pgprot_val(text_prot) &= ~clrmask;
97 pgprot_val(data_prot) &= ~clrmask;
98
99 map_range(&ptep, (u64)_stext, (u64)__initdata_begin, (u64)_stext,
100 text_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0);
101 map_range(&ptep, (u64)__initdata_begin, (u64)_end, (u64)__initdata_begin,
102 data_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0);
103
104 return ptep;
105 }
106