xref: /aosp_15_r20/external/coreboot/src/cpu/x86/mtrr/mtrr.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 /*
4  * mtrr.c: setting MTRR to decent values for cache initialization on P6
5  * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
6  *
7  * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
8  * Programming
9  */
10 
11 #include <assert.h>
12 #include <bootstate.h>
13 #include <commonlib/helpers.h>
14 #include <console/console.h>
15 #include <cpu/amd/mtrr.h>
16 #include <cpu/cpu.h>
17 #include <cpu/x86/cache.h>
18 #include <cpu/x86/lapic.h>
19 #include <cpu/x86/msr.h>
20 #include <cpu/x86/mtrr.h>
21 #include <device/device.h>
22 #include <device/pci_ids.h>
23 #include <memrange.h>
24 #include <string.h>
25 #include <types.h>
26 
27 #if CONFIG(X86_AMD_FIXED_MTRRS)
28 #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
29 #else
30 #define MTRR_FIXED_WRBACK_BITS 0
31 #endif
32 
33 #define MIN_MTRRS	8
34 
35 /*
36  * Static storage size for variable MTRRs. It's sized sufficiently large to
37  * handle different types of CPUs. Empirically, 16 variable MTRRs has not
38  * yet been observed.
39  */
40 #define NUM_MTRR_STATIC_STORAGE 16
41 
42 static int total_mtrrs;
43 
detect_var_mtrrs(void)44 static void detect_var_mtrrs(void)
45 {
46 	total_mtrrs = get_var_mtrr_count();
47 
48 	if (total_mtrrs > NUM_MTRR_STATIC_STORAGE) {
49 		printk(BIOS_WARNING,
50 			"MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
51 			total_mtrrs, NUM_MTRR_STATIC_STORAGE);
52 		total_mtrrs = NUM_MTRR_STATIC_STORAGE;
53 	}
54 }
55 
enable_fixed_mtrr(void)56 void enable_fixed_mtrr(void)
57 {
58 	msr_t msr;
59 
60 	msr = rdmsr(MTRR_DEF_TYPE_MSR);
61 	msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
62 	wrmsr(MTRR_DEF_TYPE_MSR, msr);
63 }
64 
fixed_mtrrs_expose_amd_rwdram(void)65 void fixed_mtrrs_expose_amd_rwdram(void)
66 {
67 	msr_t syscfg;
68 
69 	if (!CONFIG(X86_AMD_FIXED_MTRRS))
70 		return;
71 
72 	syscfg = rdmsr(SYSCFG_MSR);
73 	syscfg.lo |= SYSCFG_MSR_MtrrFixDramModEn;
74 	wrmsr(SYSCFG_MSR, syscfg);
75 }
76 
fixed_mtrrs_hide_amd_rwdram(void)77 void fixed_mtrrs_hide_amd_rwdram(void)
78 {
79 	msr_t syscfg;
80 
81 	if (!CONFIG(X86_AMD_FIXED_MTRRS))
82 		return;
83 
84 	syscfg = rdmsr(SYSCFG_MSR);
85 	syscfg.lo &= ~SYSCFG_MSR_MtrrFixDramModEn;
86 	wrmsr(SYSCFG_MSR, syscfg);
87 }
88 
enable_var_mtrr(unsigned char deftype)89 static void enable_var_mtrr(unsigned char deftype)
90 {
91 	msr_t msr;
92 
93 	msr = rdmsr(MTRR_DEF_TYPE_MSR);
94 	msr.lo &= ~0xff;
95 	msr.lo |= MTRR_DEF_TYPE_EN | deftype;
96 	wrmsr(MTRR_DEF_TYPE_MSR, msr);
97 }
98 
99 #define MTRR_VERBOSE_LEVEL BIOS_NEVER
100 
101 /* MTRRs are at a 4KiB granularity. */
102 #define RANGE_SHIFT 12
103 #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
104 	(((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
105 #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
106 #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
107 
108 /* Helpful constants. */
109 #define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
110 #define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
111 
112 #define MTRR_ALGO_SHIFT (8)
113 #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
114 
range_entry_base_mtrr_addr(struct range_entry * r)115 static inline uint64_t range_entry_base_mtrr_addr(struct range_entry *r)
116 {
117 	return PHYS_TO_RANGE_ADDR(range_entry_base(r));
118 }
119 
range_entry_end_mtrr_addr(struct range_entry * r)120 static inline uint64_t range_entry_end_mtrr_addr(struct range_entry *r)
121 {
122 	return PHYS_TO_RANGE_ADDR(range_entry_end(r));
123 }
124 
range_entry_mtrr_type(struct range_entry * r)125 static inline int range_entry_mtrr_type(struct range_entry *r)
126 {
127 	return range_entry_tag(r) & MTRR_TAG_MASK;
128 }
129 
filter_vga_wrcomb(struct device * dev,struct resource * res)130 static int filter_vga_wrcomb(struct device *dev, struct resource *res)
131 {
132 	/* Only handle PCI devices. */
133 	if (dev->path.type != DEVICE_PATH_PCI)
134 		return 0;
135 
136 	/* Only handle VGA class devices. */
137 	if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
138 		return 0;
139 
140 	/* Add resource as write-combining in the address space. */
141 	return 1;
142 }
143 
print_physical_address_space(const struct memranges * addr_space,const char * identifier)144 static void print_physical_address_space(const struct memranges *addr_space,
145 					const char *identifier)
146 {
147 	const struct range_entry *r;
148 
149 	if (identifier)
150 		printk(BIOS_DEBUG, "MTRR: %s Physical address space:\n",
151 			identifier);
152 	else
153 		printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
154 
155 	memranges_each_entry(r, addr_space)
156 		printk(BIOS_DEBUG,
157 		       "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
158 		       range_entry_base(r), range_entry_end(r) - 1,
159 		       range_entry_size(r), range_entry_tag(r));
160 }
161 
get_physical_address_space(void)162 static struct memranges *get_physical_address_space(void)
163 {
164 	static struct memranges *addr_space;
165 	static struct memranges addr_space_storage;
166 
167 	/* In order to handle some chipsets not being able to pre-determine
168 	 *  uncacheable ranges, such as graphics memory, at resource insertion
169 	 * time remove uncacheable regions from the cacheable ones. */
170 	if (addr_space == NULL) {
171 		unsigned long mask;
172 		unsigned long match;
173 
174 		addr_space = &addr_space_storage;
175 
176 		mask = IORESOURCE_CACHEABLE;
177 		/* Collect cacheable and uncacheable address ranges. The
178 		 * uncacheable regions take precedence over the  cacheable
179 		 * regions. */
180 		memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
181 		memranges_add_resources(addr_space, mask, 0,
182 					MTRR_TYPE_UNCACHEABLE);
183 
184 		/* Handle any write combining resources. Only prefetchable
185 		 * resources are appropriate for this MTRR type. */
186 		match = IORESOURCE_PREFETCH;
187 		mask |= match;
188 		memranges_add_resources_filter(addr_space, mask, match,
189 					MTRR_TYPE_WRCOMB, filter_vga_wrcomb);
190 
191 		/* The address space below 4GiB is special. It needs to be
192 		 * covered entirely by range entries so that MTRR calculations
193 		 * can be properly done for the full 32-bit address space.
194 		 * Therefore, ensure holes are filled up to 4GiB as
195 		 * uncacheable */
196 		memranges_fill_holes_up_to(addr_space,
197 					   RANGE_TO_PHYS_ADDR(RANGE_4GB),
198 					   MTRR_TYPE_UNCACHEABLE);
199 
200 		print_physical_address_space(addr_space, NULL);
201 	}
202 
203 	return addr_space;
204 }
205 
206 /* Fixed MTRR descriptor. This structure defines the step size and begin
207  * and end (exclusive) address covered by a set of fixed MTRR MSRs.
208  * It also describes the offset in byte intervals to store the calculated MTRR
209  * type in an array. */
210 struct fixed_mtrr_desc {
211 	uint32_t begin;
212 	uint32_t end;
213 	uint32_t step;
214 	int range_index;
215 	int msr_index_base;
216 };
217 
218 /* Shared MTRR calculations. Can be reused by APs. */
219 static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
220 
221 /* Fixed MTRR descriptors. */
222 static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
223 	{ PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
224 	  PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
225 	{ PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
226 	  PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
227 	{ PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
228 	  PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
229 };
230 
calc_fixed_mtrrs(void)231 static void calc_fixed_mtrrs(void)
232 {
233 	static int fixed_mtrr_types_initialized;
234 	struct memranges *phys_addr_space;
235 	struct range_entry *r;
236 	const struct fixed_mtrr_desc *desc;
237 	const struct fixed_mtrr_desc *last_desc;
238 	uint32_t begin;
239 	uint32_t end;
240 	int type_index;
241 
242 	if (fixed_mtrr_types_initialized)
243 		return;
244 
245 	phys_addr_space = get_physical_address_space();
246 
247 	/* Set all fixed ranges to uncacheable first. */
248 	memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
249 
250 	desc = &fixed_mtrr_desc[0];
251 	last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
252 
253 	memranges_each_entry(r, phys_addr_space) {
254 		begin = range_entry_base_mtrr_addr(r);
255 		end = range_entry_end_mtrr_addr(r);
256 
257 		if (begin >= last_desc->end)
258 			break;
259 
260 		if (end > last_desc->end)
261 			end = last_desc->end;
262 
263 		/* Get to the correct fixed mtrr descriptor. */
264 		while (begin >= desc->end)
265 			desc++;
266 
267 		type_index = desc->range_index;
268 		type_index += (begin - desc->begin) / desc->step;
269 
270 		while (begin != end) {
271 			unsigned char type;
272 
273 			type = range_entry_tag(r);
274 			printk(MTRR_VERBOSE_LEVEL,
275 			       "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
276 			       begin, begin + desc->step - 1, type, type_index);
277 			if (type == MTRR_TYPE_WRBACK)
278 				type |= MTRR_FIXED_WRBACK_BITS;
279 			fixed_mtrr_types[type_index] = type;
280 			type_index++;
281 			begin += desc->step;
282 			if (begin == desc->end)
283 				desc++;
284 		}
285 	}
286 	fixed_mtrr_types_initialized = 1;
287 }
288 
commit_fixed_mtrrs(void)289 static void commit_fixed_mtrrs(void)
290 {
291 	int i;
292 	int j;
293 	int msr_num;
294 	int type_index;
295 	const unsigned int lapic_id = lapicid();
296 	/* 8 ranges per msr. */
297 	msr_t fixed_msrs[NUM_FIXED_MTRRS];
298 	unsigned long msr_index[NUM_FIXED_MTRRS];
299 
300 	fixed_mtrrs_expose_amd_rwdram();
301 
302 	memset(&fixed_msrs, 0, sizeof(fixed_msrs));
303 
304 	msr_num = 0;
305 	type_index = 0;
306 	for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
307 		const struct fixed_mtrr_desc *desc;
308 		int num_ranges;
309 
310 		desc = &fixed_mtrr_desc[i];
311 		num_ranges = (desc->end - desc->begin) / desc->step;
312 		for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
313 			msr_index[msr_num] = desc->msr_index_base +
314 				(j / RANGES_PER_FIXED_MTRR);
315 			fixed_msrs[msr_num].lo |=
316 				fixed_mtrr_types[type_index++] << 0;
317 			fixed_msrs[msr_num].lo |=
318 				fixed_mtrr_types[type_index++] << 8;
319 			fixed_msrs[msr_num].lo |=
320 				fixed_mtrr_types[type_index++] << 16;
321 			fixed_msrs[msr_num].lo |=
322 				fixed_mtrr_types[type_index++] << 24;
323 			fixed_msrs[msr_num].hi |=
324 				fixed_mtrr_types[type_index++] << 0;
325 			fixed_msrs[msr_num].hi |=
326 				fixed_mtrr_types[type_index++] << 8;
327 			fixed_msrs[msr_num].hi |=
328 				fixed_mtrr_types[type_index++] << 16;
329 			fixed_msrs[msr_num].hi |=
330 				fixed_mtrr_types[type_index++] << 24;
331 			msr_num++;
332 		}
333 	}
334 
335 	/* Ensure that both arrays were fully initialized */
336 	ASSERT(msr_num == NUM_FIXED_MTRRS)
337 
338 	for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
339 		printk(BIOS_DEBUG, "apic_id 0x%x: MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
340 		       lapic_id, msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
341 
342 	disable_cache();
343 	for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++)
344 		wrmsr(msr_index[i], fixed_msrs[i]);
345 	enable_cache();
346 	fixed_mtrrs_hide_amd_rwdram();
347 }
348 
x86_setup_fixed_mtrrs_no_enable(void)349 static void x86_setup_fixed_mtrrs_no_enable(void)
350 {
351 	calc_fixed_mtrrs();
352 	commit_fixed_mtrrs();
353 }
354 
x86_setup_fixed_mtrrs(void)355 static void x86_setup_fixed_mtrrs(void)
356 {
357 	x86_setup_fixed_mtrrs_no_enable();
358 
359 	printk(BIOS_SPEW, "apic_id 0x%x call enable_fixed_mtrr()\n", lapicid());
360 	enable_fixed_mtrr();
361 }
362 
363 struct var_mtrr_regs {
364 	msr_t base;
365 	msr_t mask;
366 };
367 
368 struct var_mtrr_solution {
369 	int mtrr_default_type;
370 	int num_used;
371 	struct var_mtrr_regs regs[NUM_MTRR_STATIC_STORAGE];
372 };
373 
374 /* Global storage for variable MTRR solution. */
375 static struct var_mtrr_solution mtrr_global_solution;
376 
377 struct var_mtrr_state {
378 	struct memranges *addr_space;
379 	int above4gb;
380 	int address_bits;
381 	int prepare_msrs;
382 	int mtrr_index;
383 	int def_mtrr_type;
384 	struct var_mtrr_regs *regs;
385 };
386 
clear_var_mtrr(int index)387 static void clear_var_mtrr(int index)
388 {
389 	msr_t msr = { .lo = 0, .hi = 0 };
390 
391 	wrmsr(MTRR_PHYS_BASE(index), msr);
392 	wrmsr(MTRR_PHYS_MASK(index), msr);
393 }
394 
get_os_reserved_mtrrs(void)395 static int get_os_reserved_mtrrs(void)
396 {
397 	return CONFIG(RESERVE_MTRRS_FOR_OS) ? 2 : 0;
398 }
399 
prep_var_mtrr(struct var_mtrr_state * var_state,uint64_t base,uint64_t size,int mtrr_type)400 static void prep_var_mtrr(struct var_mtrr_state *var_state,
401 			  uint64_t base, uint64_t size, int mtrr_type)
402 {
403 	struct var_mtrr_regs *regs;
404 	resource_t rbase;
405 	resource_t rsize;
406 	resource_t mask;
407 
408 	if (var_state->mtrr_index >= total_mtrrs) {
409 		printk(BIOS_ERR, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
410 		       var_state->mtrr_index, total_mtrrs);
411 		return;
412 	}
413 
414 	/*
415 	 * If desired, 2 variable MTRRs are attempted to be saved for the OS to
416 	 * use. However, it's more important to try to map the full address
417 	 * space properly.
418 	 */
419 	if (var_state->mtrr_index >= total_mtrrs - get_os_reserved_mtrrs())
420 		printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
421 
422 	rbase = base;
423 	rsize = size;
424 
425 	rbase = RANGE_TO_PHYS_ADDR(rbase);
426 	rsize = RANGE_TO_PHYS_ADDR(rsize);
427 	rsize = -rsize;
428 
429 	mask = (1ULL << var_state->address_bits) - 1;
430 	rsize = rsize & mask;
431 
432 	printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
433 	       var_state->mtrr_index, rbase, rsize, mtrr_type);
434 
435 	regs = &var_state->regs[var_state->mtrr_index];
436 
437 	regs->base.lo = rbase;
438 	regs->base.lo |= mtrr_type;
439 	regs->base.hi = rbase >> 32;
440 
441 	regs->mask.lo = rsize;
442 	regs->mask.lo |= MTRR_PHYS_MASK_VALID;
443 	regs->mask.hi = rsize >> 32;
444 }
445 
446 /*
447  * fls64: find least significant bit set in a 64-bit word
448  * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
449  * fls64(0x40400000000) = 34.
450  */
fls64(uint64_t x)451 static uint32_t fls64(uint64_t x)
452 {
453 	uint32_t lo = (uint32_t)x;
454 	if (lo)
455 		return fls(lo);
456 	uint32_t hi = x >> 32;
457 	return fls(hi) + 32;
458 }
459 
460 /*
461  * fms64: find most significant bit set in a 64-bit word
462  * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
463  * fms64(0x40400000000) = 42.
464  */
fms64(uint64_t x)465 static uint32_t fms64(uint64_t x)
466 {
467 	uint32_t hi = (uint32_t)(x >> 32);
468 	if (!hi)
469 		return fms((uint32_t)x);
470 	return fms(hi) + 32;
471 }
472 
calc_var_mtrr_range(struct var_mtrr_state * var_state,uint64_t base,uint64_t size,int mtrr_type)473 static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
474 				uint64_t base, uint64_t size, int mtrr_type)
475 {
476 	while (size != 0) {
477 		uint32_t addr_lsb;
478 		uint32_t size_msb;
479 		uint64_t mtrr_size;
480 
481 		addr_lsb = fls64(base);
482 		size_msb = fms64(size);
483 
484 		/* All MTRR entries need to have their base aligned to the mask
485 		 * size. The maximum size is calculated by a function of the
486 		 * min base bit set and maximum size bit set. */
487 		if (addr_lsb > size_msb)
488 			mtrr_size = 1ULL << size_msb;
489 		else
490 			mtrr_size = 1ULL << addr_lsb;
491 
492 		if (var_state->prepare_msrs)
493 			prep_var_mtrr(var_state, base, mtrr_size, mtrr_type);
494 
495 		size -= mtrr_size;
496 		base += mtrr_size;
497 		var_state->mtrr_index++;
498 	}
499 }
500 
optimize_var_mtrr_hole(const uint64_t base,const uint64_t hole,const uint64_t limit,const int carve_hole)501 static uint64_t optimize_var_mtrr_hole(const uint64_t base,
502 				       const uint64_t hole,
503 				       const uint64_t limit,
504 				       const int carve_hole)
505 {
506 	/*
507 	 * With default type UC, we can potentially optimize a WB
508 	 * range with unaligned upper end, by aligning it up and
509 	 * carving the added "hole" out again.
510 	 *
511 	 * To optimize the upper end of the hole, we will test
512 	 * how many MTRRs calc_var_mtrr_range() will spend for any
513 	 * alignment of the hole's upper end.
514 	 *
515 	 * We take four parameters, the lower end of the WB range
516 	 * `base`, upper end of the WB range as start of the `hole`,
517 	 * a `limit` how far we may align the upper end of the hole
518 	 * up and a flag `carve_hole` whether we should count MTRRs
519 	 * for carving the hole out. We return the optimal upper end
520 	 * for the hole (which may be the same as the end of the WB
521 	 * range in case we don't gain anything by aligning up).
522 	 */
523 
524 	const int dont_care = 0;
525 	struct var_mtrr_state var_state = { 0, };
526 
527 	unsigned int align, best_count;
528 	uint32_t best_end = hole;
529 
530 	/* calculate MTRR count for the WB range alone (w/o a hole) */
531 	calc_var_mtrr_range(&var_state, base, hole - base, dont_care);
532 	best_count = var_state.mtrr_index;
533 	var_state.mtrr_index = 0;
534 
535 	for (align = fls(hole) + 1; align <= fms(hole); ++align) {
536 		const uint64_t hole_end = ALIGN_UP((uint64_t)hole, 1 << align);
537 		if (hole_end > limit)
538 			break;
539 
540 		/* calculate MTRR count for this alignment */
541 		calc_var_mtrr_range(
542 			&var_state, base, hole_end - base, dont_care);
543 		if (carve_hole)
544 			calc_var_mtrr_range(
545 				&var_state, hole, hole_end - hole, dont_care);
546 
547 		if (var_state.mtrr_index < best_count) {
548 			best_count = var_state.mtrr_index;
549 			best_end = hole_end;
550 		}
551 		var_state.mtrr_index = 0;
552 	}
553 
554 	return best_end;
555 }
556 
calc_var_mtrrs_with_hole(struct var_mtrr_state * var_state,struct range_entry * r)557 static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
558 				     struct range_entry *r)
559 {
560 	uint64_t a1, a2, b1, b2;
561 	int mtrr_type, carve_hole;
562 
563 	/*
564 	 * Determine MTRRs based on the following algorithm for the given entry:
565 	 * +------------------+ b2 = ALIGN_UP(end)
566 	 * |  0 or more bytes | <-- hole is carved out between b1 and b2
567 	 * +------------------+ a2 = b1 = original end
568 	 * |                  |
569 	 * +------------------+ a1 = begin
570 	 *
571 	 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
572 	 */
573 	mtrr_type = range_entry_mtrr_type(r);
574 
575 	a1 = range_entry_base_mtrr_addr(r);
576 	a2 = range_entry_end_mtrr_addr(r);
577 
578 	/* The end address is within the first 1MiB. The fixed MTRRs take
579 	 * precedence over the variable ones. Therefore this range
580 	 * can be ignored. */
581 	if (a2 <= RANGE_1MB)
582 		return;
583 
584 	/* Again, the fixed MTRRs take precedence so the beginning
585 	 * of the range can be set to 0 if it starts at or below 1MiB. */
586 	if (a1 <= RANGE_1MB)
587 		a1 = 0;
588 
589 	/* If the range starts above 4GiB the processing is done. */
590 	if (!var_state->above4gb && a1 >= RANGE_4GB)
591 		return;
592 
593 	/* Clip the upper address to 4GiB if addresses above 4GiB
594 	 * are not being processed. */
595 	if (!var_state->above4gb && a2 > RANGE_4GB)
596 		a2 = RANGE_4GB;
597 
598 	b1 = a2;
599 	b2 = a2;
600 	carve_hole = 0;
601 
602 	/* We only consider WB type ranges for hole-carving. */
603 	if (mtrr_type == MTRR_TYPE_WRBACK) {
604 		struct range_entry *next;
605 		uint64_t b2_limit;
606 		/*
607 		 * Depending on the type of the next range, there are three
608 		 * different situations to handle:
609 		 *
610 		 * 1. WB range is last in address space:
611 		 *    Aligning up, up to the next power of 2, may gain us
612 		 *    something.
613 		 *
614 		 * 2. The next range is of type UC:
615 		 *    We may align up, up to the _end_ of the next range. If
616 		 *    there is a gap between the current and the next range,
617 		 *    it would have been covered by the default type UC anyway.
618 		 *
619 		 * 3. The next range is not of type UC:
620 		 *    We may align up, up to the _base_ of the next range. This
621 		 *    may either be the end of the current range (if the next
622 		 *    range follows immediately) or the end of the gap between
623 		 *    the ranges.
624 		 */
625 		next = memranges_next_entry(var_state->addr_space, r);
626 		if (next == NULL) {
627 			b2_limit = ALIGN_UP((uint64_t)b1, 1 << fms(b1));
628 			/* If it's the last range above 4GiB, we won't carve
629 			   the hole out. If an OS wanted to move MMIO there,
630 			   it would have to override the MTRR setting using
631 			   PAT just like it would with WB as default type. */
632 			carve_hole = a1 < RANGE_4GB;
633 		} else if (range_entry_mtrr_type(next)
634 				== MTRR_TYPE_UNCACHEABLE) {
635 			b2_limit = range_entry_end_mtrr_addr(next);
636 			carve_hole = 1;
637 		} else {
638 			b2_limit = range_entry_base_mtrr_addr(next);
639 			carve_hole = 1;
640 		}
641 		b2 = optimize_var_mtrr_hole(a1, b1, b2_limit, carve_hole);
642 	}
643 
644 	calc_var_mtrr_range(var_state, a1, b2 - a1, mtrr_type);
645 	if (carve_hole && b2 != b1) {
646 		calc_var_mtrr_range(var_state, b1, b2 - b1,
647 				    MTRR_TYPE_UNCACHEABLE);
648 	}
649 }
650 
__calc_var_mtrrs(struct memranges * addr_space,int above4gb,int address_bits,int * num_def_wb_mtrrs,int * num_def_uc_mtrrs)651 static void __calc_var_mtrrs(struct memranges *addr_space,
652 			     int above4gb, int address_bits,
653 			     int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
654 {
655 	int wb_deftype_count;
656 	int uc_deftype_count;
657 	struct range_entry *r;
658 	struct var_mtrr_state var_state;
659 
660 	/* The default MTRR cacheability type is determined by calculating
661 	 * the number of MTRRs required for each MTRR type as if it was the
662 	 * default. */
663 	var_state.addr_space = addr_space;
664 	var_state.above4gb = above4gb;
665 	var_state.address_bits = address_bits;
666 	var_state.prepare_msrs = 0;
667 
668 	wb_deftype_count = 0;
669 	uc_deftype_count = 0;
670 
671 	/*
672 	 * For each range do 2 calculations:
673 	 *   1. UC as default type with possible holes at top of range.
674 	 *   2. WB as default.
675 	 * The lowest count is then used as default after totaling all
676 	 * MTRRs. UC takes precedence in the MTRR architecture. There-
677 	 * fore, only holes can be used when the type of the region is
678 	 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
679 	 * type.
680 	 */
681 	memranges_each_entry(r, var_state.addr_space) {
682 		int mtrr_type;
683 
684 		mtrr_type = range_entry_mtrr_type(r);
685 
686 		if (mtrr_type != MTRR_TYPE_UNCACHEABLE) {
687 			var_state.mtrr_index = 0;
688 			var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
689 			calc_var_mtrrs_with_hole(&var_state, r);
690 			uc_deftype_count += var_state.mtrr_index;
691 		}
692 
693 		if (mtrr_type != MTRR_TYPE_WRBACK) {
694 			var_state.mtrr_index = 0;
695 			var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
696 			calc_var_mtrrs_with_hole(&var_state, r);
697 			wb_deftype_count += var_state.mtrr_index;
698 		}
699 	}
700 
701 	*num_def_wb_mtrrs = wb_deftype_count;
702 	*num_def_uc_mtrrs = uc_deftype_count;
703 }
704 
calc_var_mtrrs(struct memranges * addr_space,int above4gb,int address_bits)705 static int calc_var_mtrrs(struct memranges *addr_space,
706 			  int above4gb, int address_bits)
707 {
708 	int wb_deftype_count = 0;
709 	int uc_deftype_count = 0;
710 
711 	__calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
712 			 &uc_deftype_count);
713 
714 	const int bios_mtrrs = total_mtrrs - get_os_reserved_mtrrs();
715 	if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
716 		printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
717 		       "WB/UC MTRR counts: %d/%d > %d.\n",
718 		       wb_deftype_count, uc_deftype_count, bios_mtrrs);
719 		memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
720 				     MTRR_TYPE_UNCACHEABLE);
721 		__calc_var_mtrrs(addr_space, above4gb, address_bits,
722 				 &wb_deftype_count, &uc_deftype_count);
723 	}
724 
725 	printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
726 	       wb_deftype_count, uc_deftype_count);
727 
728 	if (wb_deftype_count < uc_deftype_count) {
729 		printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
730 		return MTRR_TYPE_WRBACK;
731 	}
732 	printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
733 	return MTRR_TYPE_UNCACHEABLE;
734 }
735 
prepare_var_mtrrs(struct memranges * addr_space,int def_type,int above4gb,int address_bits,struct var_mtrr_solution * sol)736 static void prepare_var_mtrrs(struct memranges *addr_space, int def_type,
737 				int above4gb, int address_bits,
738 				struct var_mtrr_solution *sol)
739 {
740 	struct range_entry *r;
741 	struct var_mtrr_state var_state;
742 
743 	var_state.addr_space = addr_space;
744 	var_state.above4gb = above4gb;
745 	var_state.address_bits = address_bits;
746 	/* Prepare the MSRs. */
747 	var_state.prepare_msrs = 1;
748 	var_state.mtrr_index = 0;
749 	var_state.def_mtrr_type = def_type;
750 	var_state.regs = &sol->regs[0];
751 
752 	memranges_each_entry(r, var_state.addr_space) {
753 		if (range_entry_mtrr_type(r) == def_type)
754 			continue;
755 		calc_var_mtrrs_with_hole(&var_state, r);
756 	}
757 
758 	/* Update the solution. */
759 	sol->num_used = var_state.mtrr_index;
760 }
761 
commit_var_mtrrs(const struct var_mtrr_solution * sol)762 static int commit_var_mtrrs(const struct var_mtrr_solution *sol)
763 {
764 	int i;
765 
766 	if (sol->num_used > total_mtrrs) {
767 		printk(BIOS_WARNING, "Not enough MTRRs: %d vs %d\n",
768 			sol->num_used, total_mtrrs);
769 		return -1;
770 	}
771 
772 	/* Write out the variable MTRRs. */
773 	disable_cache();
774 	for (i = 0; i < sol->num_used; i++) {
775 		wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
776 		wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
777 	}
778 	/* Clear the ones that are unused. */
779 	for (; i < total_mtrrs; i++)
780 		clear_var_mtrr(i);
781 	enable_var_mtrr(sol->mtrr_default_type);
782 	enable_cache();
783 
784 	return 0;
785 }
786 
x86_setup_var_mtrrs(unsigned int address_bits,unsigned int above4gb)787 void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
788 {
789 	static struct var_mtrr_solution *sol = NULL;
790 	struct memranges *addr_space;
791 
792 	addr_space = get_physical_address_space();
793 
794 	if (sol == NULL) {
795 		sol = &mtrr_global_solution;
796 		sol->mtrr_default_type =
797 			calc_var_mtrrs(addr_space, !!above4gb, address_bits);
798 		prepare_var_mtrrs(addr_space, sol->mtrr_default_type,
799 				  !!above4gb, address_bits, sol);
800 	}
801 
802 	commit_var_mtrrs(sol);
803 }
804 
_x86_setup_mtrrs(unsigned int above4gb)805 static void _x86_setup_mtrrs(unsigned int above4gb)
806 {
807 	int address_size;
808 
809 	enable_lapic();
810 
811 	x86_setup_fixed_mtrrs();
812 	address_size = cpu_phys_address_size();
813 	printk(BIOS_DEBUG, "apic_id 0x%x setup mtrr for CPU physical address size: %d bits\n",
814 				lapicid(), address_size);
815 	x86_setup_var_mtrrs(address_size, above4gb);
816 }
817 
x86_setup_mtrrs(void)818 void x86_setup_mtrrs(void)
819 {
820 	/* Without detect, assume the minimum */
821 	total_mtrrs = MIN_MTRRS;
822 	/* Always handle addresses above 4GiB. */
823 	_x86_setup_mtrrs(1);
824 }
825 
x86_setup_mtrrs_with_detect(void)826 void x86_setup_mtrrs_with_detect(void)
827 {
828 	detect_var_mtrrs();
829 	/* Always handle addresses above 4GiB. */
830 	_x86_setup_mtrrs(1);
831 }
832 
x86_setup_mtrrs_with_detect_no_above_4gb(void)833 void x86_setup_mtrrs_with_detect_no_above_4gb(void)
834 {
835 	detect_var_mtrrs();
836 	_x86_setup_mtrrs(0);
837 }
838 
x86_mtrr_check(void)839 void x86_mtrr_check(void)
840 {
841 	/* Only Pentium Pro and later have MTRR */
842 	msr_t msr;
843 	printk(BIOS_DEBUG, "\nMTRR check\n");
844 
845 	msr = rdmsr(MTRR_DEF_TYPE_MSR);
846 
847 	printk(BIOS_DEBUG, "Fixed MTRRs   : ");
848 	if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
849 		printk(BIOS_DEBUG, "Enabled\n");
850 	else
851 		printk(BIOS_DEBUG, "Disabled\n");
852 
853 	printk(BIOS_DEBUG, "Variable MTRRs: ");
854 	if (msr.lo & MTRR_DEF_TYPE_EN)
855 		printk(BIOS_DEBUG, "Enabled\n");
856 	else
857 		printk(BIOS_DEBUG, "Disabled\n");
858 
859 	printk(BIOS_DEBUG, "\n");
860 
861 	post_code(0x93);
862 }
863 
864 static bool put_back_original_solution;
865 
mtrr_use_temp_range(uintptr_t begin,size_t size,int type)866 void mtrr_use_temp_range(uintptr_t begin, size_t size, int type)
867 {
868 	const struct range_entry *r;
869 	const struct memranges *orig;
870 	struct var_mtrr_solution sol;
871 	struct memranges addr_space;
872 	const int above4gb = 1; /* Cover above 4GiB by default. */
873 	int address_bits;
874 	static struct temp_range {
875 		uintptr_t begin;
876 		size_t size;
877 		int type;
878 	} temp_ranges[10];
879 
880 	if (size == 0)
881 		return;
882 
883 	int i;
884 	for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
885 		if (temp_ranges[i].size == 0) {
886 			temp_ranges[i].begin = begin;
887 			temp_ranges[i].size = size;
888 			temp_ranges[i].type = type;
889 			break;
890 		}
891 	}
892 	if (i == ARRAY_SIZE(temp_ranges)) {
893 		printk(BIOS_ERR, "Out of temporary ranges for MTRR use\n");
894 		return;
895 	}
896 
897 	/* Make a copy of the original address space and tweak it with the
898 	 * provided range. */
899 	memranges_init_empty(&addr_space, NULL, 0);
900 	orig = get_physical_address_space();
901 	memranges_each_entry(r, orig) {
902 		unsigned long tag = range_entry_tag(r);
903 
904 		/* Remove any write combining MTRRs from the temporary
905 		 * solution as it just fragments the address space. */
906 		if (tag == MTRR_TYPE_WRCOMB)
907 			tag = MTRR_TYPE_UNCACHEABLE;
908 
909 		memranges_insert(&addr_space, range_entry_base(r),
910 				range_entry_size(r), tag);
911 	}
912 
913 	/* Place new range into the address space. */
914 	for (i = 0; i < ARRAY_SIZE(temp_ranges); i++) {
915 		if (temp_ranges[i].size != 0)
916 			memranges_insert(&addr_space, temp_ranges[i].begin,
917 					 temp_ranges[i].size, temp_ranges[i].type);
918 	}
919 
920 	print_physical_address_space(&addr_space, "TEMPORARY");
921 
922 	/* Calculate a new solution with the updated address space. */
923 	address_bits = cpu_phys_address_size();
924 	memset(&sol, 0, sizeof(sol));
925 	sol.mtrr_default_type =
926 		calc_var_mtrrs(&addr_space, above4gb, address_bits);
927 	prepare_var_mtrrs(&addr_space, sol.mtrr_default_type,
928 				above4gb, address_bits, &sol);
929 
930 	if (commit_var_mtrrs(&sol) < 0)
931 		printk(BIOS_WARNING, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
932 			(long long)begin, (long long)begin + size - 1,
933 			(long long)size, type);
934 	else
935 		put_back_original_solution = true;
936 
937 	memranges_teardown(&addr_space);
938 }
939 
remove_temp_solution(void * unused)940 static void remove_temp_solution(void *unused)
941 {
942 	if (put_back_original_solution)
943 		commit_var_mtrrs(&mtrr_global_solution);
944 }
945 
946 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, remove_temp_solution, NULL);
947 BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_BOOT, BS_ON_ENTRY, remove_temp_solution, NULL);
948