xref: /aosp_15_r20/external/coreboot/src/cpu/intel/haswell/smmrelocate.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <types.h>
4 #include <string.h>
5 #include <device/device.h>
6 #include <device/pci.h>
7 #include <device/pci_ops.h>
8 #include <cpu/x86/mp.h>
9 #include <cpu/x86/msr.h>
10 #include <cpu/x86/mtrr.h>
11 #include <cpu/x86/smm.h>
12 #include <cpu/intel/em64t101_save_state.h>
13 #include <cpu/intel/smm_reloc.h>
14 #include <console/console.h>
15 #include <northbridge/intel/haswell/haswell.h>
16 #include <southbridge/intel/lynxpoint/pch.h>
17 #include <smp/node.h>
18 #include "haswell.h"
19 
update_save_state(int cpu,uintptr_t curr_smbase,uintptr_t staggered_smbase,struct smm_relocation_params * relo_params)20 static void update_save_state(int cpu, uintptr_t curr_smbase,
21 				uintptr_t staggered_smbase,
22 				struct smm_relocation_params *relo_params)
23 {
24 	u32 smbase;
25 	u32 iedbase;
26 
27 	/* The relocated handler runs with all CPUs concurrently. Therefore
28 	 * stagger the entry points adjusting SMBASE downwards by save state
29 	 * size * CPU num. */
30 	smbase = staggered_smbase;
31 	iedbase = relo_params->ied_base;
32 
33 	printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
34 	       smbase, iedbase);
35 
36 	/* All threads need to set IEDBASE and SMBASE to the relocated
37 	 * handler region. However, the save state location depends on the
38 	 * smm_save_state_in_msrs field in the relocation parameters. If
39 	 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
40 	 * the SMM handler in parallel, and each CPUs save state area is
41 	 * located in their respective MSR space. If smm_save_state_in_msrs
42 	 * is zero then the SMM relocation is happening serially so the
43 	 * save state is at the same default location for all CPUs. */
44 	if (relo_params->smm_save_state_in_msrs) {
45 		msr_t smbase_msr;
46 		msr_t iedbase_msr;
47 
48 		smbase_msr.lo = smbase;
49 		smbase_msr.hi = 0;
50 
51 		/* According the BWG the IEDBASE MSR is in bits 63:32. It's
52 		 * not clear why it differs from the SMBASE MSR. */
53 		iedbase_msr.lo = 0;
54 		iedbase_msr.hi = iedbase;
55 
56 		wrmsr(SMBASE_MSR, smbase_msr);
57 		wrmsr(IEDBASE_MSR, iedbase_msr);
58 	} else {
59 		em64t101_smm_state_save_area_t *save_state;
60 
61 		save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
62 				      sizeof(*save_state));
63 
64 		save_state->smbase = smbase;
65 		save_state->iedbase = iedbase;
66 	}
67 }
68 
69 /* Returns 1 if SMM MSR save state was set. */
bsp_setup_msr_save_state(struct smm_relocation_params * relo_params)70 static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
71 {
72 	msr_t smm_mca_cap;
73 
74 	smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
75 	if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
76 		msr_t smm_feature_control;
77 
78 		smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
79 		smm_feature_control.hi = 0;
80 		smm_feature_control.lo |= SMM_CPU_SAVE_EN;
81 		wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
82 		relo_params->smm_save_state_in_msrs = 1;
83 	}
84 	return relo_params->smm_save_state_in_msrs;
85 }
86 
87 /* The relocation work is actually performed in SMM context, but the code
88  * resides in the ramstage module. This occurs by trampolining from the default
89  * SMRAM entry point to here. */
smm_relocation_handler(int cpu,uintptr_t curr_smbase,uintptr_t staggered_smbase)90 void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
91 				uintptr_t staggered_smbase)
92 {
93 	msr_t mtrr_cap;
94 	struct smm_relocation_params *relo_params = &smm_reloc_params;
95 
96 	printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
97 
98 	/* Determine if the processor supports saving state in MSRs. If so,
99 	 * enable it before the non-BSPs run so that SMM relocation can occur
100 	 * in parallel in the non-BSP CPUs. */
101 	if (cpu == 0) {
102 		/* If smm_save_state_in_msrs is 1 then that means this is the
103 		 * 2nd time through the relocation handler for the BSP.
104 		 * Parallel SMM handler relocation is taking place. However,
105 		 * it is desired to access other CPUs save state in the real
106 		 * SMM handler. Therefore, disable the SMM save state in MSRs
107 		 * feature. */
108 		if (relo_params->smm_save_state_in_msrs) {
109 			msr_t smm_feature_control;
110 
111 			smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
112 			smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
113 			wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
114 		} else if (bsp_setup_msr_save_state(relo_params))
115 			/* Just return from relocation handler if MSR save
116 			 * state is enabled. In that case the BSP will come
117 			 * back into the relocation handler to setup the new
118 			 * SMBASE as well disabling SMM save state in MSRs. */
119 			return;
120 	}
121 
122 	/* Make appropriate changes to the save state map. */
123 	update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
124 
125 	/* Write PRMRR and SMRR MSRs based on indicated support. */
126 	mtrr_cap = rdmsr(MTRR_CAP_MSR);
127 	if (mtrr_cap.lo & SMRR_SUPPORTED)
128 		write_smrr(relo_params);
129 
130 	if (mtrr_cap.lo & PRMRR_SUPPORTED) {
131 		write_prmrr(relo_params);
132 		/* UNCORE_PRMRR msrs are package level. Therefore, only
133 		 * configure these MSRs on the BSP. */
134 		if (cpu == 0)
135 			write_uncore_prmrr(relo_params);
136 	}
137 }
138 
fill_in_relocation_params(struct smm_relocation_params * params)139 static void fill_in_relocation_params(struct smm_relocation_params *params)
140 {
141 	uintptr_t tseg_base;
142 	size_t tseg_size;
143 	u32 prmrr_base;
144 	u32 prmrr_size;
145 	int phys_bits;
146 	/* All range registers are aligned to 4KiB */
147 	const u32 rmask = ~((1 << 12) - 1);
148 
149 	/* Some of the range registers are dependent on the number of physical
150 	 * address bits supported. */
151 	phys_bits = cpuid_eax(0x80000008) & 0xff;
152 
153 	/* The range bounded by the TSEGMB and BGSM registers encompasses the
154 	 * SMRAM range as well as the IED range. However, the SMRAM available
155 	 * to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
156 	 */
157 	smm_region(&tseg_base, &tseg_size);
158 
159 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
160 	params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
161 	params->smrr_base.hi = 0;
162 	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
163 	params->smrr_mask.hi = 0;
164 
165 	smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
166 
167 	/* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
168 	prmrr_base = (params->ied_base + (2 << 20)) & rmask;
169 	prmrr_size = params->ied_size - (2 << 20);
170 
171 	/* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
172 	 * on the number of physical address bits supported. */
173 	params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
174 	params->prmrr_base.hi = 0;
175 	params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask)
176 		| MTRR_PHYS_MASK_VALID;
177 	params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
178 
179 	/* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
180 	params->uncore_prmrr_base.lo = prmrr_base;
181 	params->uncore_prmrr_base.hi = 0;
182 	params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) |
183 					MTRR_PHYS_MASK_VALID;
184 	params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
185 }
186 
setup_ied_area(struct smm_relocation_params * params)187 static void setup_ied_area(struct smm_relocation_params *params)
188 {
189 	char *ied_base;
190 
191 	struct ied_header ied = {
192 		.signature = "INTEL RSVD",
193 		.size = params->ied_size,
194 		.reserved = {0},
195 	};
196 
197 	ied_base = (void *)params->ied_base;
198 
199 	/* Place IED header at IEDBASE. */
200 	memcpy(ied_base, &ied, sizeof(ied));
201 
202 	/* Zero out 32KiB at IEDBASE + 1MiB */
203 	memset(ied_base + (1 << 20), 0, (32 << 10));
204 
205 	/* According to the BWG MP init section 2MiB of memory at IEDBASE +
206 	 * 2MiB should be zeroed as well. However, I suspect what is intended
207 	 * is to clear the memory covered by PRMRR. TODO(adurbin): figure out if
208 	 * this is really required.
209 	 */
210 	//memset(ied_base + (2 << 20), 0, (2 << 20));
211 }
212 
smm_info(uintptr_t * perm_smbase,size_t * perm_smsize,size_t * smm_save_state_size)213 void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
214 				size_t *smm_save_state_size)
215 {
216 	printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
217 
218 	fill_in_relocation_params(&smm_reloc_params);
219 
220 	smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
221 
222 	setup_ied_area(&smm_reloc_params);
223 
224 	*smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
225 }
226 
smm_initialize(void)227 void smm_initialize(void)
228 {
229 	/* Clear the SMM state in the southbridge. */
230 	smm_southbridge_clear_state();
231 
232 	/*
233 	 * Run the relocation handler for on the BSP to check and set up
234 	 * parallel SMM relocation.
235 	 */
236 	smm_initiate_relocation();
237 
238 	if (smm_reloc_params.smm_save_state_in_msrs)
239 		printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
240 }
241 
242 /* The default SMM entry can happen in parallel or serially. If the
243  * default SMM entry is done in parallel the BSP has already setup
244  * the saving state to each CPU's MSRs. At least one save state size
245  * is required for the initial SMM entry for the BSP to determine if
246  * parallel SMM relocation is even feasible. */
smm_relocate(void)247 void smm_relocate(void)
248 {
249 	/*
250 	 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
251 	 * shall take place. Run the relocation handler a second time on the
252 	 * BSP to do * the final move. For APs, a relocation handler always
253 	 * needs to be run.
254 	 */
255 	if (smm_reloc_params.smm_save_state_in_msrs)
256 		smm_initiate_relocation_parallel();
257 	else if (!boot_cpu())
258 		smm_initiate_relocation();
259 }
260 
smm_lock(void)261 void smm_lock(void)
262 {
263 	/* LOCK the SMM memory window and enable normal SMM.
264 	 * After running this function, only a full reset can
265 	 * make the SMM registers writable again.
266 	 */
267 	printk(BIOS_DEBUG, "Locking SMM.\n");
268 	pci_write_config8(pcidev_on_root(0, 0), SMRAM, D_LCK | G_SMRAME | C_BASE_SEG);
269 }
270