xref: /aosp_15_r20/external/coreboot/src/cpu/x86/pae/pgtbl.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <cbfs.h>
4 #include <commonlib/helpers.h>
5 #include <console/console.h>
6 #include <cpu/cpu.h>
7 #include <cpu/x86/cr.h>
8 #include <cpu/x86/msr.h>
9 #include <cpu/x86/pae.h>
10 #include <string.h>
11 #include <symbols.h>
12 #include <types.h>
13 
14 #define PDPTE_PRES (1ULL << 0)
15 #define PDPTE_ADDR_MASK (~((1ULL << 12) - 1))
16 
17 #define PDE_PRES (1ULL << 0)
18 #define PDE_RW   (1ULL << 1)
19 #define PDE_US   (1ULL << 2)
20 #define PDE_PWT  (1ULL << 3)
21 #define PDE_PCD  (1ULL << 4)
22 #define PDE_A    (1ULL << 5)
23 #define PDE_D    (1ULL << 6) // only valid with PS=1
24 #define PDE_PS   (1ULL << 7)
25 #define PDE_G    (1ULL << 8)  // only valid with PS=1
26 #define PDE_PAT  (1ULL << 12) // only valid with PS=1
27 #define PDE_XD   (1ULL << 63)
28 #define PDE_ADDR_MASK (~((1ULL << 12) - 1))
29 
30 #define PTE_PRES (1ULL << 0)
31 #define PTE_RW   (1ULL << 1)
32 #define PTE_US   (1ULL << 2)
33 #define PTE_PWT  (1ULL << 3)
34 #define PTE_PCD  (1ULL << 4)
35 #define PTE_A    (1ULL << 5)
36 #define PTE_D    (1ULL << 6)
37 #define PTE_PAT  (1ULL << 7)
38 #define PTE_G    (1ULL << 8)
39 #define PTE_XD   (1ULL << 63)
40 
41 #define PDPTE_IDX_SHIFT 30
42 #define PDPTE_IDX_MASK  0x3
43 
44 #define PDE_IDX_SHIFT 21
45 #define PDE_IDX_MASK  0x1ff
46 
47 #define PTE_IDX_SHIFT 12
48 #define PTE_IDX_MASK  0x1ff
49 
50 #define OVERLAP(a, b, s, e) ((b) > (s) && (a) < (e))
51 
52 static const size_t s2MiB = 2 * MiB;
53 static const size_t s4KiB = 4 * KiB;
54 
55 struct pde {
56 	uint32_t addr_lo;
57 	uint32_t addr_hi;
58 } __packed;
59 struct pg_table {
60 	struct pde pd[2048];
61 	struct pde pdp[512];
62 } __packed;
63 
paging_enable_pae_cr3(uintptr_t cr3)64 void paging_enable_pae_cr3(uintptr_t cr3)
65 {
66 	/* Load the page table address */
67 	write_cr3(cr3);
68 	paging_enable_pae();
69 }
70 
paging_enable_pae(void)71 void paging_enable_pae(void)
72 {
73 	CRx_TYPE cr0;
74 	CRx_TYPE cr4;
75 
76 	/* Enable PAE */
77 	cr4 = read_cr4();
78 	cr4 |= CR4_PAE;
79 	write_cr4(cr4);
80 
81 	/* Enable Paging */
82 	cr0 = read_cr0();
83 	cr0 |= CR0_PG;
84 	write_cr0(cr0);
85 }
86 
paging_disable_pae(void)87 void paging_disable_pae(void)
88 {
89 	CRx_TYPE cr0;
90 	CRx_TYPE cr4;
91 
92 	/* Disable Paging */
93 	cr0 = read_cr0();
94 	cr0 &= ~(CRx_TYPE)CR0_PG;
95 	write_cr0(cr0);
96 
97 	/* Disable PAE */
98 	cr4 = read_cr4();
99 	cr4 &= ~(CRx_TYPE)CR4_PAE;
100 	write_cr4(cr4);
101 }
102 
103 /*
104  * Prepare PAE pagetables that identity map the whole 32-bit address space using
105  * 2 MiB pages. The PAT are set to all cacheable, but MTRRs still apply. CR3 is
106  * loaded and PAE is enabled by this function.
107  *
108  * Requires a scratch memory for pagetables.
109  *
110  * @param pgtbl     Where pagetables reside, must be 4 KiB aligned and 20 KiB in
111  *                  size.
112  *                  Content at physical address isn't preserved.
113  * @return 0 on success, 1 on error
114  */
init_pae_pagetables(void * pgtbl)115 int init_pae_pagetables(void *pgtbl)
116 {
117 	struct pg_table *pgtbl_buf = (struct pg_table *)pgtbl;
118 	struct pde *pd = pgtbl_buf->pd, *pdp = pgtbl_buf->pdp;
119 
120 	printk(BIOS_DEBUG, "%s: Using address %p for page tables\n",
121 	       __func__, pgtbl_buf);
122 
123 	/* Cover some basic error conditions */
124 	if (!IS_ALIGNED((uintptr_t)pgtbl_buf, s4KiB)) {
125 		printk(BIOS_ERR, "%s: Invalid alignment\n", __func__);
126 		return 1;
127 	}
128 
129 	paging_disable_pae();
130 
131 	/* Point the page directory pointers at the page directories. */
132 	memset(pgtbl_buf->pdp, 0, sizeof(pgtbl_buf->pdp));
133 
134 	pdp[0].addr_lo = ((uintptr_t)&pd[512*0]) | PDPTE_PRES;
135 	pdp[1].addr_lo = ((uintptr_t)&pd[512*1]) | PDPTE_PRES;
136 	pdp[2].addr_lo = ((uintptr_t)&pd[512*2]) | PDPTE_PRES;
137 	pdp[3].addr_lo = ((uintptr_t)&pd[512*3]) | PDPTE_PRES;
138 
139 	/* Identity map the whole 32-bit address space */
140 	for (size_t i = 0; i < 2048; i++) {
141 		pd[i].addr_lo = (i << PDE_IDX_SHIFT) | PDE_PS | PDE_PRES | PDE_RW;
142 		pd[i].addr_hi = 0;
143 	}
144 
145 	paging_enable_pae_cr3((uintptr_t)pdp);
146 
147 	return 0;
148 }
149 
150 /*
151  * Map single 2 MiB page in pagetables created by init_pae_pagetables().
152  *
153  * The function does not check if the page was already non identity mapped,
154  * this allows callers to reuse one page without having to explicitly unmap it
155  * between calls.
156  *
157  * @param pgtbl     Where pagetables created by init_pae_pagetables() reside.
158  *                  Content at physical address is preserved except for single
159  *                  entry corresponding to vmem_addr.
160  * @param paddr     Physical memory address to map. Function prints a warning if
161  *                  it isn't aligned to 2 MiB.
162  * @param vmem_addr Where the virtual non identity mapped page resides, must
163  *                  be at least 2 MiB in size. Function prints a warning if it
164  *                  isn't aligned to 2 MiB.
165  *                  Content at physical address is preserved.
166  * @return 0 on success, 1 on error
167  */
pae_map_2M_page(void * pgtbl,uint64_t paddr,void * vmem_addr)168 void pae_map_2M_page(void *pgtbl, uint64_t paddr, void *vmem_addr)
169 {
170 	struct pg_table *pgtbl_buf = (struct pg_table *)pgtbl;
171 	struct pde *pd;
172 
173 	if (!IS_ALIGNED(paddr, s2MiB)) {
174 		printk(BIOS_WARNING, "%s: Aligning physical address to 2MiB\n",
175 		       __func__);
176 		paddr = ALIGN_DOWN(paddr, s2MiB);
177 	}
178 
179 	if (!IS_ALIGNED((uintptr_t)vmem_addr, s2MiB)) {
180 		printk(BIOS_WARNING, "%s: Aligning virtual address to 2MiB\n",
181 		       __func__);
182 		vmem_addr = (void *)ALIGN_DOWN((uintptr_t)vmem_addr, s2MiB);
183 	}
184 
185 	/* Map a page using PAE at virtual address vmem_addr. */
186 	pd = &pgtbl_buf->pd[((uintptr_t)vmem_addr) >> PDE_IDX_SHIFT];
187 	pd->addr_lo = paddr | PDE_PS | PDE_PRES | PDE_RW;
188 	pd->addr_hi = paddr >> 32;
189 
190 	/* Update page tables */
191 	asm volatile ("invlpg (%0)" :: "b"(vmem_addr) : "memory");
192 }
193 
194 /*
195  * Use PAE to map a page and then memset it with the pattern specified.
196  * In order to use PAE pagetables for virtual addressing are set up and reloaded
197  * on a 2MiB boundary. After the function is done, virtual addressing mode is
198  * disabled again. The PAT are set to all cacheable, but MTRRs still apply.
199  *
200  * Requires a scratch memory for pagetables and a virtual address for
201  * non identity mapped memory.
202  *
203  * The scratch memory area containing pagetables must not overlap with the
204  * memory range to be cleared.
205  * The scratch memory area containing pagetables must not overlap with the
206  * virtual address for non identity mapped memory.
207  *
208  * @param vmem_addr Where the virtual non identity mapped page resides, must
209  *                  be 2 aligned MiB and at least 2 MiB in size.
210  *                  Content at physical address is preserved.
211  * @param pgtbl     Where pagetables reside, must be 4 KiB aligned and 20 KiB in
212  *                  size.
213  *                  Must not overlap memory range pointed to by dest.
214  *                  Must not overlap memory range pointed to by vmem_addr.
215  *                  Content at physical address isn't preserved.
216  * @param length    The length of the memory segment to memset
217  * @param dest      Physical memory address to memset
218  * @param pat       The pattern to write to the physical memory
219  * @return 0 on success, 1 on error
220  */
memset_pae(uint64_t dest,unsigned char pat,uint64_t length,void * pgtbl,void * vmem_addr)221 int memset_pae(uint64_t dest, unsigned char pat, uint64_t length, void *pgtbl,
222 	       void *vmem_addr)
223 {
224 	ssize_t offset;
225 	const uintptr_t pgtbl_s = (uintptr_t)pgtbl;
226 	const uintptr_t pgtbl_e = pgtbl_s + sizeof(struct pg_table);
227 
228 	printk(BIOS_DEBUG, "%s: Using virtual address %p as scratchpad\n",
229 	       __func__, vmem_addr);
230 
231 	/* Cover some basic error conditions */
232 	if (!IS_ALIGNED((uintptr_t)vmem_addr, s2MiB)) {
233 		printk(BIOS_ERR, "%s: Invalid alignment\n", __func__);
234 		return 1;
235 	}
236 
237 	if (OVERLAP(dest, dest + length, pgtbl_s, pgtbl_e)) {
238 		printk(BIOS_ERR, "%s: destination overlaps page tables\n",
239 		       __func__);
240 		return 1;
241 	}
242 
243 	if (OVERLAP((uintptr_t)vmem_addr, (uintptr_t)vmem_addr + s2MiB,
244 		    pgtbl_s, pgtbl_e)) {
245 		printk(BIOS_ERR, "%s: vmem address overlaps page tables\n",
246 		       __func__);
247 		return 1;
248 	}
249 
250 	if (init_pae_pagetables(pgtbl))
251 		return 1;
252 
253 	offset = dest - ALIGN_DOWN(dest, s2MiB);
254 	dest = ALIGN_DOWN(dest, s2MiB);
255 
256 	do {
257 		const size_t len = MIN(length, s2MiB - offset);
258 
259 		/*
260 		 * Map a page using PAE at virtual address vmem_addr.
261 		 * dest is already 2 MiB aligned.
262 		 */
263 		pae_map_2M_page(pgtbl, dest, vmem_addr);
264 
265 		printk(BIOS_SPEW, "%s: Clearing %llx[%lx] - %zx\n", __func__,
266 		       dest + offset, (uintptr_t)vmem_addr + offset, len);
267 
268 		memset(vmem_addr + offset, pat, len);
269 
270 		dest += s2MiB;
271 		length -= len;
272 		offset = 0;
273 	} while (length > 0);
274 
275 	paging_disable_pae();
276 
277 	return 0;
278 }
279 
paging_set_nxe(int enable)280 void paging_set_nxe(int enable)
281 {
282 	msr_t msr = rdmsr(IA32_EFER);
283 
284 	if (enable)
285 		msr.lo |= EFER_NXE;
286 	else
287 		msr.lo &= ~EFER_NXE;
288 
289 	wrmsr(IA32_EFER, msr);
290 }
291 
paging_set_pat(uint64_t pat)292 void paging_set_pat(uint64_t pat)
293 {
294 	msr_t msr;
295 	msr.lo = pat;
296 	msr.hi = pat >> 32;
297 	wrmsr(IA32_PAT, msr);
298 }
299 
300 /* PAT encoding used in util/x86/x86_page_tables.go. It matches the linux
301  * kernel settings:
302  *  PTE encoding:
303  *      PAT
304  *      |PCD
305  *      ||PWT  PAT
306  *      |||    slot
307  *      000    0    WB : _PAGE_CACHE_MODE_WB
308  *      001    1    WC : _PAGE_CACHE_MODE_WC
309  *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
310  *      011    3    UC : _PAGE_CACHE_MODE_UC
311  *      100    4    WB : Reserved
312  *      101    5    WP : _PAGE_CACHE_MODE_WP
313  *      110    6    UC-: Reserved
314  *      111    7    WT : _PAGE_CACHE_MODE_WT
315  */
paging_set_default_pat(void)316 void paging_set_default_pat(void)
317 {
318 	uint64_t pat =  PAT_ENCODE(WB, 0) | PAT_ENCODE(WC, 1) |
319 			PAT_ENCODE(UC_MINUS, 2) | PAT_ENCODE(UC, 3) |
320 			PAT_ENCODE(WB, 4) | PAT_ENCODE(WP, 5) |
321 			PAT_ENCODE(UC_MINUS, 6) | PAT_ENCODE(WT, 7);
322 	paging_set_pat(pat);
323 }
324 
paging_enable_for_car(const char * pdpt_name,const char * pt_name)325 int paging_enable_for_car(const char *pdpt_name, const char *pt_name)
326 {
327 	if (!preram_symbols_available())
328 		return -1;
329 
330 	if (!cbfs_load(pdpt_name, _pdpt, REGION_SIZE(pdpt))) {
331 		printk(BIOS_ERR, "Couldn't load pdpt\n");
332 		return -1;
333 	}
334 
335 	if (!cbfs_load(pt_name, _pagetables, REGION_SIZE(pagetables))) {
336 		printk(BIOS_ERR, "Couldn't load page tables\n");
337 		return -1;
338 	}
339 
340 	paging_enable_pae_cr3((uintptr_t)_pdpt);
341 
342 	return 0;
343 }
344