xref: /aosp_15_r20/external/coreboot/src/soc/intel/common/block/sgx/sgx.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <cpu/x86/msr.h>
5 #include <cpu/x86/mtrr.h>
6 #include <cpu/intel/microcode.h>
7 #include <cpu/intel/common/common.h>
8 #include <intelblocks/cpulib.h>
9 #include <intelblocks/msr.h>
10 #include <intelblocks/sgx.h>
11 #include <intelblocks/systemagent.h>
12 #include <soc/cpu.h>
13 #include <soc/pci_devs.h>
14 
prmrr_core_configure(void)15 void prmrr_core_configure(void)
16 {
17 	msr_t prmrr_base, prmrr_mask;
18 
19 	/*
20 	 * Software Developer's Manual Volume 4:
21 	 * Order Number: 335592-068US
22 	 * Chapter 2.16.1
23 	 * MSR_PRMRR_PHYS_MASK is in scope "Core"
24 	 * MSR_PRMRR_PHYS_BASE is in scope "Core"
25 	 * Return if Hyper-Threading is enabled and not thread 0
26 	 */
27 	if (!is_sgx_supported() || intel_ht_sibling())
28 		return;
29 
30 	/* PRMRR_PHYS_MASK is in scope "Core" */
31 	prmrr_mask = rdmsr(MSR_PRMRR_PHYS_MASK);
32 	/* If it is locked don't attempt to write PRMRR MSRs. */
33 	if (prmrr_mask.lo & PRMRR_PHYS_MASK_LOCK)
34 		return;
35 
36 	/* PRMRR base and mask are read from the UNCORE PRMRR MSRs
37 	 * that are already set in FSP-M. */
38 	if (soc_get_uncore_prmmr_base_and_mask(&prmrr_base.raw,
39 						&prmrr_mask.raw) < 0) {
40 		printk(BIOS_ERR, "SGX: Failed to get PRMRR base and mask\n");
41 		return;
42 	}
43 
44 	if (!prmrr_base.lo) {
45 		printk(BIOS_ERR, "SGX Error: Uncore PRMRR is not set!\n");
46 		return;
47 	}
48 
49 	printk(BIOS_INFO, "SGX: prmrr_base = 0x%llx\n", prmrr_base.raw);
50 	printk(BIOS_INFO, "SGX: prmrr_mask = 0x%llx\n", prmrr_mask.raw);
51 
52 	/* Program core PRMRR MSRs.
53 	 * - Set cache writeback mem attrib in PRMRR base MSR
54 	 * - Clear the valid bit in PRMRR mask MSR
55 	 * - Lock PRMRR MASK MSR */
56 	prmrr_base.lo |= MTRR_TYPE_WRBACK;
57 	wrmsr(MSR_PRMRR_PHYS_BASE, prmrr_base);
58 	prmrr_mask.lo &= ~PRMRR_PHYS_MASK_VALID;
59 	prmrr_mask.lo |= PRMRR_PHYS_MASK_LOCK;
60 	wrmsr(MSR_PRMRR_PHYS_MASK, prmrr_mask);
61 }
62 
is_prmrr_set(void)63 static int is_prmrr_set(void)
64 {
65 	msr_t prmrr_base, prmrr_mask;
66 	prmrr_base = rdmsr(MSR_PRMRR_PHYS_BASE);
67 	prmrr_mask = rdmsr(MSR_PRMRR_PHYS_MASK);
68 
69 	/* If PRMRR base is zero and PRMRR mask is locked
70 	 * then PRMRR is not set */
71 	if ((prmrr_base.hi == 0) && (prmrr_base.lo == 0)
72 		&& (prmrr_mask.lo & PRMRR_PHYS_MASK_LOCK))
73 		return 0;
74 	return 1;
75 }
76 
enable_sgx(void)77 static void enable_sgx(void)
78 {
79 	msr_t msr;
80 
81 	/*
82 	 * Intel 64 and IA-32 ArchitecturesSoftware Developer's ManualVolume 3C
83 	 * Order Number:  326019-060US
84 	 * Chapter 35.10.2 "Additional MSRs Supported by Intel"
85 	 * IA32_FEATURE_CONTROL is in scope "Thread"
86 	 */
87 	msr = rdmsr(IA32_FEATURE_CONTROL);
88 	/* Only enable it when it is not locked */
89 	if ((msr.lo & FEATURE_CONTROL_LOCK_BIT) == 0) {
90 		msr.lo |= SGX_GLOBAL_ENABLE; /* Enable it */
91 		wrmsr(IA32_FEATURE_CONTROL, msr);
92 	}
93 }
94 
lock_sgx(void)95 static void lock_sgx(void)
96 {
97 	msr_t msr;
98 
99 	/*
100 	 * Intel 64 and IA-32 ArchitecturesSoftware Developer's ManualVolume 3C
101 	 * Order Number:  326019-060US
102 	 * Chapter 35.10.2 "Additional MSRs Supported by Intel"
103 	 * IA32_FEATURE_CONTROL is in scope "Thread"
104 	 */
105 	msr = rdmsr(IA32_FEATURE_CONTROL);
106 	/* If it is locked don't attempt to lock it again. */
107 	if ((msr.lo & 1) == 0) {
108 		msr.lo |= 1; /* Lock it */
109 		wrmsr(IA32_FEATURE_CONTROL, msr);
110 	}
111 }
112 
owner_epoch_update(void)113 static int owner_epoch_update(void)
114 {
115 	/* TODO - the Owner Epoch update mechanism is not determined yet,
116 	 * for PoC just write '0's to the MSRs. */
117 	msr_t msr = { .raw = 0 };
118 
119 	/* SGX_OWNEREPOCH is in scope "Package" */
120 	wrmsr(MSR_SGX_OWNEREPOCH0, msr);
121 	wrmsr(MSR_SGX_OWNEREPOCH1, msr);
122 	return 0;
123 }
124 
activate_sgx(void)125 static void activate_sgx(void)
126 {
127 	msr_t msr;
128 
129 	/* Activate SGX feature by writing 1b to MSR 0x7A on all threads.
130 	 * BIOS must ensure bit 0 is set prior to writing to it, then read it
131 	 * back and verify the bit is cleared to confirm SGX activation. */
132 	msr = rdmsr(MSR_BIOS_UPGD_TRIG);
133 	if (msr.lo & SGX_ACTIVATE_BIT) {
134 		wrmsr(MSR_BIOS_UPGD_TRIG,
135 			(msr_t) {.lo = SGX_ACTIVATE_BIT, .hi = 0});
136 		/* Read back to verify it is activated */
137 		msr = rdmsr(MSR_BIOS_UPGD_TRIG);
138 		if (msr.lo & SGX_ACTIVATE_BIT)
139 			printk(BIOS_ERR, "SGX activation failed.\n");
140 		else
141 			printk(BIOS_INFO, "SGX activation was successful.\n");
142 	} else {
143 		printk(BIOS_ERR, "SGX feature is deactivated.\n");
144 	}
145 }
146 
is_prmrr_approved(void)147 static int is_prmrr_approved(void)
148 {
149 	msr_t msr;
150 	msr = rdmsr(MSR_PRMRR_PHYS_MASK);
151 	if (msr.lo & PRMRR_PHYS_MASK_VALID) {
152 		printk(BIOS_INFO, "SGX: MCHECK approved SGX PRMRR\n");
153 		return 1;
154 	}
155 
156 	printk(BIOS_INFO, "SGX: MCHECK did not approve SGX PRMRR\n");
157 	return 0;
158 }
159 
160 /*
161  * Configures SGX according to "Intel Software Guard Extensions Technology"
162  * Document Number: 565432
163  */
sgx_configure(void * unused)164 void sgx_configure(void *unused)
165 {
166 	if (!is_sgx_supported() || !is_prmrr_set()) {
167 		printk(BIOS_ERR, "SGX: not supported or pre-conditions not met\n");
168 		return;
169 	}
170 
171 	/* Enable the SGX feature on all threads. */
172 	enable_sgx();
173 
174 	/* Update the owner epoch value */
175 	if (owner_epoch_update() < 0)
176 		return;
177 
178 	/* Ensure to lock memory before reloading microcode patch */
179 	if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_LOCK_MEMORY))
180 		cpu_lt_lock_memory();
181 
182 	/*
183 	 * Update just on the first CPU in the core. Other siblings
184 	 * get the update automatically according to Document: 253668-060US
185 	 * Intel SDM Chapter 9.11.6.3
186 	 * "Update in a System Supporting Intel Hyper-Threading Technology"
187 	 * Intel Hyper-Threading Technology has implications on the loading of the
188 	 * microcode update. The update must be loaded for each core in a physical
189 	 * processor. Thus, for a processor supporting Intel Hyper-Threading
190 	 * Technology, only one logical processor per core is required to load the
191 	 * microcode update. Each individual logical processor can independently
192 	 * load the update. However, MP initialization must provide some mechanism
193 	 * (e.g. a software semaphore) to force serialization of microcode update
194 	 * loads and to prevent simultaneous load attempts to the same core.
195 	 */
196 	if (!intel_ht_sibling()) {
197 		const void *microcode_patch = intel_microcode_find();
198 		intel_microcode_load_unlocked(microcode_patch);
199 	}
200 
201 	/* Lock the SGX feature on all threads. */
202 	lock_sgx();
203 
204 	/* Activate the SGX feature, if PRMRR config was approved by MCHECK */
205 	if (is_prmrr_approved())
206 		activate_sgx();
207 }
208