1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #define __SIMPLE_DEVICE__
4
5 #include <assert.h>
6 #include <commonlib/helpers.h>
7 #include <console/console.h>
8 #include <device/mmio.h>
9 #include <delay.h>
10 #include <device/pci.h>
11 #include <device/pci_ids.h>
12 #include <device/pci_ops.h>
13 #include <intelblocks/cse.h>
14 #include <intelblocks/me.h>
15 #include <intelblocks/pmclib.h>
16 #include <intelblocks/post_codes.h>
17 #include <option.h>
18 #include <security/vboot/misc.h>
19 #include <security/vboot/vboot_common.h>
20 #include <soc/intel/common/reset.h>
21 #include <soc/iomap.h>
22 #include <soc/pci_devs.h>
23 #include <string.h>
24 #include <timer.h>
25 #include <types.h>
26
27 #define HECI_BASE_SIZE (4 * KiB)
28
29 #define MAX_HECI_MESSAGE_RETRY_COUNT 5
30
31 /* Wait up to 15 sec for HECI to get ready */
32 #define HECI_DELAY_READY_MS (15 * 1000)
33 /* Wait up to 100 usec between circular buffer polls */
34 #define HECI_DELAY_US 100
35 /* Wait up to 5 sec for CSE to chew something we sent */
36 #define HECI_SEND_TIMEOUT_MS (5 * 1000)
37 /* Wait up to 5 sec for CSE to blurp a reply */
38 #define HECI_READ_TIMEOUT_MS (5 * 1000)
39 /* Wait up to 1 ms for CSE CIP */
40 #define HECI_CIP_TIMEOUT_US 1000
41 /* Wait up to 5 seconds for CSE to boot from RO(BP1) */
42 #define CSE_DELAY_BOOT_TO_RO_MS (5 * 1000)
43
44 #define SLOT_SIZE sizeof(uint32_t)
45
46 #define MMIO_CSE_CB_WW 0x00
47 #define MMIO_HOST_CSR 0x04
48 #define MMIO_CSE_CB_RW 0x08
49 #define MMIO_CSE_CSR 0x0c
50 #define MMIO_CSE_DEVIDLE 0x800
51 #define CSE_DEV_IDLE (1 << 2)
52 #define CSE_DEV_CIP (1 << 0)
53
54 #define CSR_IE (1 << 0)
55 #define CSR_IS (1 << 1)
56 #define CSR_IG (1 << 2)
57 #define CSR_READY (1 << 3)
58 #define CSR_RESET (1 << 4)
59 #define CSR_RP_START 8
60 #define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
61 #define CSR_WP_START 16
62 #define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
63 #define CSR_CBD_START 24
64 #define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
65
66 #define MEI_HDR_IS_COMPLETE (1 << 31)
67 #define MEI_HDR_LENGTH_START 16
68 #define MEI_HDR_LENGTH_SIZE 9
69 #define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
70 << MEI_HDR_LENGTH_START)
71 #define MEI_HDR_HOST_ADDR_START 8
72 #define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
73 #define MEI_HDR_CSE_ADDR_START 0
74 #define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
75
76 /* Get HECI BAR 0 from PCI configuration space */
get_cse_bar(pci_devfn_t dev)77 static uintptr_t get_cse_bar(pci_devfn_t dev)
78 {
79 uintptr_t bar;
80
81 bar = pci_read_config32(dev, PCI_BASE_ADDRESS_0);
82 assert(bar != 0);
83 /*
84 * Bits 31-12 are the base address as per EDS for SPI,
85 * Don't care about 0-11 bit
86 */
87 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
88 }
89
heci_assign_resource(pci_devfn_t dev,uintptr_t tempbar)90 static void heci_assign_resource(pci_devfn_t dev, uintptr_t tempbar)
91 {
92 u16 pcireg;
93
94 /* Assign Resources */
95 /* Clear BIT 1-2 of Command Register */
96 pcireg = pci_read_config16(dev, PCI_COMMAND);
97 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
98 pci_write_config16(dev, PCI_COMMAND, pcireg);
99
100 /* Program Temporary BAR for HECI device */
101 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
102 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
103
104 /* Enable Bus Master and MMIO Space */
105 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
106 }
107
108 /*
109 * Initialize the CSE device with provided temporary BAR. If BAR is 0 use a
110 * default. This is intended for pre-mem usage only where BARs haven't been
111 * assigned yet and devices are not enabled.
112 */
cse_init(uintptr_t tempbar)113 void cse_init(uintptr_t tempbar)
114 {
115 pci_devfn_t dev = PCH_DEV_CSE;
116
117 /* Check if device enabled */
118 if (!is_cse_enabled())
119 return;
120
121 /* Assume it is already initialized, nothing else to do */
122 if (get_cse_bar(dev))
123 return;
124
125 /* Use default pre-ram bar */
126 if (!tempbar)
127 tempbar = HECI1_BASE_ADDRESS;
128
129 /* Assign HECI resource and enable the resource */
130 heci_assign_resource(dev, tempbar);
131
132 /* Trigger HECI Reset and make Host ready for communication with CSE */
133 heci_reset();
134 }
135
read_bar(pci_devfn_t dev,uint32_t offset)136 static uint32_t read_bar(pci_devfn_t dev, uint32_t offset)
137 {
138 return read32p(get_cse_bar(dev) + offset);
139 }
140
write_bar(pci_devfn_t dev,uint32_t offset,uint32_t val)141 static void write_bar(pci_devfn_t dev, uint32_t offset, uint32_t val)
142 {
143 return write32p(get_cse_bar(dev) + offset, val);
144 }
145
read_cse_csr(void)146 static uint32_t read_cse_csr(void)
147 {
148 return read_bar(PCH_DEV_CSE, MMIO_CSE_CSR);
149 }
150
read_host_csr(void)151 static uint32_t read_host_csr(void)
152 {
153 return read_bar(PCH_DEV_CSE, MMIO_HOST_CSR);
154 }
155
write_host_csr(uint32_t data)156 static void write_host_csr(uint32_t data)
157 {
158 write_bar(PCH_DEV_CSE, MMIO_HOST_CSR, data);
159 }
160
filled_slots(uint32_t data)161 static size_t filled_slots(uint32_t data)
162 {
163 uint8_t wp, rp;
164 rp = data >> CSR_RP_START;
165 wp = data >> CSR_WP_START;
166 return (uint8_t)(wp - rp);
167 }
168
cse_filled_slots(void)169 static size_t cse_filled_slots(void)
170 {
171 return filled_slots(read_cse_csr());
172 }
173
host_empty_slots(void)174 static size_t host_empty_slots(void)
175 {
176 uint32_t csr;
177 csr = read_host_csr();
178
179 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
180 }
181
clear_int(void)182 static void clear_int(void)
183 {
184 uint32_t csr;
185 csr = read_host_csr();
186 csr |= CSR_IS;
187 write_host_csr(csr);
188 }
189
read_slot(void)190 static uint32_t read_slot(void)
191 {
192 return read_bar(PCH_DEV_CSE, MMIO_CSE_CB_RW);
193 }
194
write_slot(uint32_t val)195 static void write_slot(uint32_t val)
196 {
197 write_bar(PCH_DEV_CSE, MMIO_CSE_CB_WW, val);
198 }
199
wait_write_slots(size_t cnt)200 static int wait_write_slots(size_t cnt)
201 {
202 struct stopwatch sw;
203
204 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT_MS);
205 while (host_empty_slots() < cnt) {
206 udelay(HECI_DELAY_US);
207 if (stopwatch_expired(&sw)) {
208 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
209 return 0;
210 }
211 }
212 return 1;
213 }
214
wait_read_slots(size_t cnt)215 static int wait_read_slots(size_t cnt)
216 {
217 struct stopwatch sw;
218
219 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT_MS);
220 while (cse_filled_slots() < cnt) {
221 udelay(HECI_DELAY_US);
222 if (stopwatch_expired(&sw)) {
223 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
224 return 0;
225 }
226 }
227 return 1;
228 }
229
230 /* get number of full 4-byte slots */
bytes_to_slots(size_t bytes)231 static size_t bytes_to_slots(size_t bytes)
232 {
233 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
234 }
235
cse_ready(void)236 static int cse_ready(void)
237 {
238 uint32_t csr;
239 csr = read_cse_csr();
240 return csr & CSR_READY;
241 }
242
cse_check_hfs1_com(int mode)243 static bool cse_check_hfs1_com(int mode)
244 {
245 union me_hfsts1 hfs1;
246 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
247 return hfs1.fields.operation_mode == mode;
248 }
249
cse_is_hfs1_fw_init_complete(void)250 static bool cse_is_hfs1_fw_init_complete(void)
251 {
252 union me_hfsts1 hfs1;
253 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
254 if (hfs1.fields.fw_init_complete)
255 return true;
256 return false;
257 }
258
cse_is_hfs1_cws_normal(void)259 bool cse_is_hfs1_cws_normal(void)
260 {
261 union me_hfsts1 hfs1;
262 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
263 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
264 return true;
265 return false;
266 }
267
cse_is_hfs1_com_normal(void)268 bool cse_is_hfs1_com_normal(void)
269 {
270 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
271 }
272
cse_is_hfs1_com_secover_mei_msg(void)273 bool cse_is_hfs1_com_secover_mei_msg(void)
274 {
275 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
276 }
277
cse_is_hfs1_com_soft_temp_disable(void)278 bool cse_is_hfs1_com_soft_temp_disable(void)
279 {
280 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
281 }
282
283 /*
284 * Starting from TGL platform, HFSTS1.spi_protection_mode replaces mfg_mode to indicate
285 * SPI protection status as well as end-of-manufacturing(EOM) status where EOM flow is
286 * triggered in single staged operation (either through first boot with required MFIT
287 * configuratin or FPT /CLOSEMANUF).
288 * In staged manufacturing flow, spi_protection_mode alone doesn't indicate the EOM status.
289 *
290 * HFSTS1.spi_protection_mode description:
291 * mfg_mode = 0 means SPI protection is on.
292 * mfg_mode = 1 means SPI is unprotected.
293 */
cse_is_hfs1_spi_protected(void)294 bool cse_is_hfs1_spi_protected(void)
295 {
296 union me_hfsts1 hfs1;
297 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
298 return !hfs1.fields.mfg_mode;
299 }
300
cse_is_hfs3_fw_sku_lite(void)301 bool cse_is_hfs3_fw_sku_lite(void)
302 {
303 union me_hfsts3 hfs3;
304 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
305 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_LITE;
306 }
307
308 /* Makes the host ready to communicate with CSE */
cse_set_host_ready(void)309 void cse_set_host_ready(void)
310 {
311 uint32_t csr;
312 csr = read_host_csr();
313 csr &= ~CSR_RESET;
314 csr |= (CSR_IG | CSR_READY);
315 write_host_csr(csr);
316 }
317
318 /* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
cse_wait_sec_override_mode(void)319 uint8_t cse_wait_sec_override_mode(void)
320 {
321 struct stopwatch sw;
322 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY_MS);
323 while (!cse_is_hfs1_com_secover_mei_msg()) {
324 udelay(HECI_DELAY_US);
325 if (stopwatch_expired(&sw)) {
326 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
327 return 0;
328 }
329 }
330 printk(BIOS_DEBUG, "HECI: CSE took %lld ms to enter security override mode\n",
331 stopwatch_duration_msecs(&sw));
332 return 1;
333 }
334
335 /*
336 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
337 * The CSE enters the current operation mode when it boots from RO(BP1).
338 */
cse_wait_com_soft_temp_disable(void)339 uint8_t cse_wait_com_soft_temp_disable(void)
340 {
341 struct stopwatch sw;
342 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO_MS);
343 while (!cse_is_hfs1_com_soft_temp_disable()) {
344 udelay(HECI_DELAY_US);
345 if (stopwatch_expired(&sw)) {
346 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
347 return 0;
348 }
349 }
350 printk(BIOS_SPEW, "HECI: CSE took %lld ms to boot from RO\n",
351 stopwatch_duration_msecs(&sw));
352 return 1;
353 }
354
wait_heci_ready(void)355 static int wait_heci_ready(void)
356 {
357 struct stopwatch sw;
358
359 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY_MS);
360 while (!cse_ready()) {
361 udelay(HECI_DELAY_US);
362 if (stopwatch_expired(&sw))
363 return 0;
364 }
365
366 return 1;
367 }
368
host_gen_interrupt(void)369 static void host_gen_interrupt(void)
370 {
371 uint32_t csr;
372 csr = read_host_csr();
373 csr |= CSR_IG;
374 write_host_csr(csr);
375 }
376
hdr_get_length(uint32_t hdr)377 static size_t hdr_get_length(uint32_t hdr)
378 {
379 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
380 }
381
382 static int
send_one_message(uint32_t hdr,const void * buff)383 send_one_message(uint32_t hdr, const void *buff)
384 {
385 size_t pend_len, pend_slots, remainder, i;
386 uint32_t tmp;
387 const uint32_t *p = buff;
388
389 /* Get space for the header */
390 if (!wait_write_slots(1))
391 return 0;
392
393 /* First, write header */
394 write_slot(hdr);
395
396 pend_len = hdr_get_length(hdr);
397 pend_slots = bytes_to_slots(pend_len);
398
399 if (!wait_write_slots(pend_slots))
400 return 0;
401
402 /* Write the body in whole slots */
403 i = 0;
404 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
405 write_slot(*p++);
406 i += SLOT_SIZE;
407 }
408
409 remainder = pend_len % SLOT_SIZE;
410 /* Pad to 4 bytes not touching caller's buffer */
411 if (remainder) {
412 memcpy(&tmp, p, remainder);
413 write_slot(tmp);
414 }
415
416 host_gen_interrupt();
417
418 /* Make sure nothing bad happened during transmission */
419 if (!cse_ready())
420 return 0;
421
422 return pend_len;
423 }
424
425 enum cse_tx_rx_status
heci_send(const void * msg,size_t len,uint8_t host_addr,uint8_t client_addr)426 heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
427 {
428 uint8_t retry;
429 uint32_t csr, hdr;
430 size_t sent, remaining, cb_size, max_length;
431 const uint8_t *p;
432
433 if (!msg || !len)
434 return CSE_TX_ERR_INPUT;
435
436 clear_int();
437
438 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
439 p = msg;
440
441 if (!wait_heci_ready()) {
442 printk(BIOS_ERR, "HECI: not ready\n");
443 continue;
444 }
445
446 csr = read_host_csr();
447 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
448 /*
449 * Reserve one slot for the header. Limit max message
450 * length by 9 bits that are available in the header.
451 */
452 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
453 - SLOT_SIZE;
454 remaining = len;
455
456 /*
457 * Fragment the message into smaller messages not exceeding
458 * useful circular buffer length. Mark last message complete.
459 */
460 do {
461 hdr = MIN(max_length, remaining)
462 << MEI_HDR_LENGTH_START;
463 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
464 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
465 hdr |= (MIN(max_length, remaining) == remaining) ?
466 MEI_HDR_IS_COMPLETE : 0;
467 sent = send_one_message(hdr, p);
468 p += sent;
469 remaining -= sent;
470 } while (remaining > 0 && sent != 0);
471
472 if (!remaining)
473 return CSE_TX_RX_SUCCESS;
474 }
475
476 printk(BIOS_DEBUG, "HECI: Trigger HECI reset\n");
477 heci_reset();
478 return CSE_TX_ERR_CSE_NOT_READY;
479 }
480
481 static enum cse_tx_rx_status
recv_one_message(uint32_t * hdr,void * buff,size_t maxlen,size_t * recv_len)482 recv_one_message(uint32_t *hdr, void *buff, size_t maxlen, size_t *recv_len)
483 {
484 uint32_t reg, *p = buff;
485 size_t recv_slots, remainder, i;
486
487 /* first get the header */
488 if (!wait_read_slots(1))
489 return CSE_RX_ERR_TIMEOUT;
490
491 *hdr = read_slot();
492 *recv_len = hdr_get_length(*hdr);
493
494 if (!*recv_len)
495 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
496
497 recv_slots = bytes_to_slots(*recv_len);
498
499 i = 0;
500 if (*recv_len > maxlen) {
501 printk(BIOS_ERR, "HECI: response is too big\n");
502 return CSE_RX_ERR_RESP_LEN_MISMATCH;
503 }
504
505 /* wait for the rest of messages to arrive */
506 wait_read_slots(recv_slots);
507
508 /* fetch whole slots first */
509 while (i < ALIGN_DOWN(*recv_len, SLOT_SIZE)) {
510 *p++ = read_slot();
511 i += SLOT_SIZE;
512 }
513
514 /*
515 * If ME is not ready, something went wrong and
516 * we received junk
517 */
518 if (!cse_ready())
519 return CSE_RX_ERR_CSE_NOT_READY;
520
521 remainder = *recv_len % SLOT_SIZE;
522
523 if (remainder) {
524 reg = read_slot();
525 memcpy(p, ®, remainder);
526 }
527 return CSE_TX_RX_SUCCESS;
528 }
529
heci_receive(void * buff,size_t * maxlen)530 enum cse_tx_rx_status heci_receive(void *buff, size_t *maxlen)
531 {
532 uint8_t retry;
533 size_t left, received;
534 uint32_t hdr = 0;
535 uint8_t *p;
536 enum cse_tx_rx_status ret = CSE_RX_ERR_TIMEOUT;
537
538 if (!buff || !maxlen || !*maxlen)
539 return CSE_RX_ERR_INPUT;
540
541 clear_int();
542
543 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
544 p = buff;
545 left = *maxlen;
546
547 if (!wait_heci_ready()) {
548 printk(BIOS_ERR, "HECI: not ready\n");
549 continue;
550 }
551
552 /*
553 * Receive multiple packets until we meet one marked
554 * complete or we run out of space in caller-provided buffer.
555 */
556 do {
557 ret = recv_one_message(&hdr, p, left, &received);
558 if (ret) {
559 printk(BIOS_ERR, "HECI: Failed to receive!\n");
560 goto CSE_RX_ERR_HANDLE;
561 }
562 left -= received;
563 p += received;
564 /* If we read out everything ping to send more */
565 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
566 host_gen_interrupt();
567 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
568
569 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
570 *maxlen = p - (uint8_t *)buff;
571 if (CONFIG(SOC_INTEL_CSE_SERVER_SKU))
572 clear_int();
573 return CSE_TX_RX_SUCCESS;
574 }
575 }
576
577 CSE_RX_ERR_HANDLE:
578 printk(BIOS_DEBUG, "HECI: Trigger HECI Reset\n");
579 heci_reset();
580 return CSE_RX_ERR_CSE_NOT_READY;
581 }
582
heci_send_receive(const void * snd_msg,size_t snd_sz,void * rcv_msg,size_t * rcv_sz,uint8_t cse_addr)583 enum cse_tx_rx_status heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg,
584 size_t *rcv_sz, uint8_t cse_addr)
585 {
586 enum cse_tx_rx_status ret;
587
588 ret = heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, cse_addr);
589 if (ret) {
590 printk(BIOS_ERR, "HECI: send Failed\n");
591 return ret;
592 }
593
594 if (rcv_msg != NULL) {
595 ret = heci_receive(rcv_msg, rcv_sz);
596 if (ret) {
597 printk(BIOS_ERR, "HECI: receive Failed\n");
598 return ret;
599 }
600 }
601 return ret;
602 }
603
604 /*
605 * Attempt to reset the device. This is useful when host and ME are out
606 * of sync during transmission or ME didn't understand the message.
607 */
heci_reset(void)608 int heci_reset(void)
609 {
610 uint32_t csr;
611
612 /* Clear post code to prevent eventlog entry from unknown code. */
613 post_code(POSTCODE_CODE_ZERO);
614
615 /* Send reset request */
616 csr = read_host_csr();
617 csr |= (CSR_RESET | CSR_IG);
618 write_host_csr(csr);
619
620 if (wait_heci_ready()) {
621 /* Device is back on its imaginary feet, clear reset */
622 cse_set_host_ready();
623 return 1;
624 }
625
626 printk(BIOS_CRIT, "HECI: reset failed\n");
627
628 return 0;
629 }
630
is_cse_devfn_visible(unsigned int devfn)631 bool is_cse_devfn_visible(unsigned int devfn)
632 {
633 int slot = PCI_SLOT(devfn);
634 int func = PCI_FUNC(devfn);
635
636 if (!is_devfn_enabled(devfn)) {
637 printk(BIOS_WARNING, "HECI: CSE device %02x.%01x is disabled\n", slot, func);
638 return false;
639 }
640
641 if (pci_read_config16(PCI_DEV(0, slot, func), PCI_VENDOR_ID) == 0xFFFF) {
642 printk(BIOS_WARNING, "HECI: CSE device %02x.%01x is hidden\n", slot, func);
643 return false;
644 }
645
646 return true;
647 }
648
is_cse_enabled(void)649 bool is_cse_enabled(void)
650 {
651 return is_cse_devfn_visible(PCH_DEVFN_CSE);
652 }
653
me_read_config32(int offset)654 uint32_t me_read_config32(int offset)
655 {
656 return pci_read_config32(PCH_DEV_CSE, offset);
657 }
658
cse_is_global_reset_allowed(void)659 static bool cse_is_global_reset_allowed(void)
660 {
661 /*
662 * Allow sending GLOBAL_RESET command only if:
663 * - CSE's current working state is Normal and current operation mode is Normal.
664 * - (or) CSE's current working state is normal and current operation mode can
665 * be Soft Temp Disable or Security Override Mode if CSE's Firmware SKU is
666 * Lite.
667 */
668 if (!cse_is_hfs1_cws_normal())
669 return false;
670
671 if (cse_is_hfs1_com_normal())
672 return true;
673
674 if (cse_is_hfs3_fw_sku_lite()) {
675 if (cse_is_hfs1_com_soft_temp_disable() || cse_is_hfs1_com_secover_mei_msg())
676 return true;
677 }
678 return false;
679 }
680
681 /*
682 * Sends GLOBAL_RESET_REQ cmd to CSE with reset type GLOBAL_RESET.
683 * Returns 0 on failure and 1 on success.
684 */
cse_request_reset(enum rst_req_type rst_type)685 static int cse_request_reset(enum rst_req_type rst_type)
686 {
687 int status;
688 struct mkhi_hdr reply;
689 struct reset_message {
690 struct mkhi_hdr hdr;
691 uint8_t req_origin;
692 uint8_t reset_type;
693 } __packed;
694 struct reset_message msg = {
695 .hdr = {
696 .group_id = MKHI_GROUP_ID_CBM,
697 .command = MKHI_CBM_GLOBAL_RESET_REQ,
698 },
699 .req_origin = GR_ORIGIN_BIOS_POST,
700 .reset_type = rst_type
701 };
702 size_t reply_size;
703
704 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
705
706 if (!(rst_type == GLOBAL_RESET || rst_type == CSE_RESET_ONLY)) {
707 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
708 return 0;
709 }
710
711 if (!cse_is_global_reset_allowed() || !is_cse_enabled()) {
712 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
713 return 0;
714 }
715
716 heci_reset();
717
718 reply_size = sizeof(reply);
719 memset(&reply, 0, reply_size);
720
721 if (rst_type == CSE_RESET_ONLY)
722 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
723 else
724 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size,
725 HECI_MKHI_ADDR);
726
727 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", !status ? "success" : "failure");
728 return status;
729 }
730
cse_request_global_reset(void)731 int cse_request_global_reset(void)
732 {
733 return cse_request_reset(GLOBAL_RESET);
734 }
735
cse_is_hmrfpo_enable_allowed(void)736 static bool cse_is_hmrfpo_enable_allowed(void)
737 {
738 /*
739 * Allow sending HMRFPO ENABLE command only if:
740 * - CSE's current working state is Normal and current operation mode is Normal
741 * - (or) cse's current working state is normal and current operation mode is
742 * Soft Temp Disable if CSE's Firmware SKU is Lite
743 */
744 if (!cse_is_hfs1_cws_normal())
745 return false;
746
747 if (cse_is_hfs1_com_normal())
748 return true;
749
750 if (cse_is_hfs3_fw_sku_lite() && cse_is_hfs1_com_soft_temp_disable())
751 return true;
752
753 return false;
754 }
755
756 /* Sends HMRFPO Enable command to CSE */
cse_hmrfpo_enable(void)757 enum cb_err cse_hmrfpo_enable(void)
758 {
759 struct hmrfpo_enable_msg {
760 struct mkhi_hdr hdr;
761 uint32_t nonce[2];
762 } __packed;
763
764 /* HMRFPO Enable message */
765 struct hmrfpo_enable_msg msg = {
766 .hdr = {
767 .group_id = MKHI_GROUP_ID_HMRFPO,
768 .command = MKHI_HMRFPO_ENABLE,
769 },
770 .nonce = {0},
771 };
772
773 /* HMRFPO Enable response */
774 struct hmrfpo_enable_resp {
775 struct mkhi_hdr hdr;
776 /* Base addr for factory data area, not relevant for client SKUs */
777 uint32_t fct_base;
778 /* Length of factory data area, not relevant for client SKUs */
779 uint32_t fct_limit;
780 uint8_t status;
781 uint8_t reserved[3];
782 } __packed;
783
784 struct hmrfpo_enable_resp resp;
785 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
786
787 if (cse_is_hfs1_com_secover_mei_msg()) {
788 printk(BIOS_DEBUG, "HECI: CSE is already in security override mode, "
789 "skip sending HMRFPO_ENABLE command to CSE\n");
790 return CB_SUCCESS;
791 }
792
793 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
794
795 if (!cse_is_hmrfpo_enable_allowed()) {
796 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
797 return CB_ERR;
798 }
799
800 if (heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
801 &resp, &resp_size, HECI_MKHI_ADDR))
802 return CB_ERR;
803
804 if (resp.hdr.result) {
805 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
806 return CB_ERR;
807 }
808
809 if (resp.status) {
810 printk(BIOS_ERR, "HECI: HMRFPO_Enable Failed (resp status: %d)\n", resp.status);
811 return CB_ERR;
812 }
813
814 return CB_SUCCESS;
815 }
816
817 /*
818 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
819 * The status can be DISABLED/LOCKED/ENABLED
820 */
cse_hmrfpo_get_status(void)821 int cse_hmrfpo_get_status(void)
822 {
823 struct hmrfpo_get_status_msg {
824 struct mkhi_hdr hdr;
825 } __packed;
826
827 struct hmrfpo_get_status_resp {
828 struct mkhi_hdr hdr;
829 uint8_t status;
830 uint8_t reserved[3];
831 } __packed;
832
833 struct hmrfpo_get_status_msg msg = {
834 .hdr = {
835 .group_id = MKHI_GROUP_ID_HMRFPO,
836 .command = MKHI_HMRFPO_GET_STATUS,
837 },
838 };
839 struct hmrfpo_get_status_resp resp;
840 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
841
842 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
843
844 if (!cse_is_hfs1_cws_normal()) {
845 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
846 return -1;
847 }
848
849 if (heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
850 &resp, &resp_size, HECI_MKHI_ADDR)) {
851 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
852 return -1;
853 }
854
855 if (resp.hdr.result) {
856 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
857 resp.hdr.result);
858 return -1;
859 }
860
861 return resp.status;
862 }
863
864 /* Queries and gets ME firmware version */
get_me_fw_version(struct me_fw_ver_resp * resp)865 static enum cb_err get_me_fw_version(struct me_fw_ver_resp *resp)
866 {
867 const struct mkhi_hdr fw_ver_msg = {
868 .group_id = MKHI_GROUP_ID_GEN,
869 .command = MKHI_GEN_GET_FW_VERSION,
870 };
871
872 if (resp == NULL) {
873 printk(BIOS_ERR, "%s failed, null pointer parameter\n", __func__);
874 return CB_ERR;
875 }
876 size_t resp_size = sizeof(*resp);
877
878 /* Ignore if CSE is disabled */
879 if (!is_cse_enabled())
880 return CB_ERR;
881
882 /*
883 * Prerequisites:
884 * 1) HFSTS1 Current Working State is Normal
885 * 2) HFSTS1 Current Operation Mode is Normal
886 * 3) It's after DRAM INIT DONE message (taken care of by calling it
887 * during ramstage
888 */
889 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal())
890 return CB_ERR;
891
892 heci_reset();
893
894 if (heci_send_receive(&fw_ver_msg, sizeof(fw_ver_msg), resp, &resp_size,
895 HECI_MKHI_ADDR))
896 return CB_ERR;
897
898 if (resp->hdr.result)
899 return CB_ERR;
900
901
902 return CB_SUCCESS;
903 }
904
print_me_fw_version(void * unused)905 void print_me_fw_version(void *unused)
906 {
907 struct me_fw_ver_resp resp = {0};
908
909 /* Ignore if UART debugging is disabled */
910 if (!CONFIG(CONSOLE_SERIAL))
911 return;
912
913 /*
914 * Skip if ME firmware is Lite SKU, as RO/RW versions are
915 * already logged by `cse_print_boot_partition_info()`
916 */
917 if (cse_is_hfs3_fw_sku_lite())
918 return;
919
920 if (get_me_fw_version(&resp) == CB_SUCCESS) {
921 printk(BIOS_DEBUG, "ME: Version: %d.%d.%d.%d\n", resp.code.major,
922 resp.code.minor, resp.code.hotfix, resp.code.build);
923 return;
924 }
925 printk(BIOS_DEBUG, "ME: Version: Unavailable\n");
926 }
927
cse_trigger_vboot_recovery(enum csme_failure_reason reason)928 void cse_trigger_vboot_recovery(enum csme_failure_reason reason)
929 {
930 printk(BIOS_DEBUG, "cse: CSE status registers: HFSTS1: 0x%x, HFSTS2: 0x%x "
931 "HFSTS3: 0x%x\n", me_read_config32(PCI_ME_HFSTS1),
932 me_read_config32(PCI_ME_HFSTS2), me_read_config32(PCI_ME_HFSTS3));
933
934 if (CONFIG(VBOOT))
935 vboot_fail_and_reboot(vboot_get_context(), VB2_RECOVERY_INTEL_CSE_LITE_SKU,
936 reason);
937
938 die("cse: Failed to trigger recovery mode(recovery subcode:%d)\n", reason);
939 }
940
disable_cse_idle(pci_devfn_t dev)941 static bool disable_cse_idle(pci_devfn_t dev)
942 {
943 struct stopwatch sw;
944 uint32_t dev_idle_ctrl = read_bar(dev, MMIO_CSE_DEVIDLE);
945 dev_idle_ctrl &= ~CSE_DEV_IDLE;
946 write_bar(dev, MMIO_CSE_DEVIDLE, dev_idle_ctrl);
947
948 stopwatch_init_usecs_expire(&sw, HECI_CIP_TIMEOUT_US);
949 do {
950 dev_idle_ctrl = read_bar(dev, MMIO_CSE_DEVIDLE);
951 if ((dev_idle_ctrl & CSE_DEV_CIP) == CSE_DEV_CIP)
952 return true;
953 udelay(HECI_DELAY_US);
954 } while (!stopwatch_expired(&sw));
955
956 return false;
957 }
958
enable_cse_idle(pci_devfn_t dev)959 static void enable_cse_idle(pci_devfn_t dev)
960 {
961 uint32_t dev_idle_ctrl = read_bar(dev, MMIO_CSE_DEVIDLE);
962 dev_idle_ctrl |= CSE_DEV_IDLE;
963 write_bar(dev, MMIO_CSE_DEVIDLE, dev_idle_ctrl);
964 }
965
get_cse_device_state(unsigned int devfn)966 enum cse_device_state get_cse_device_state(unsigned int devfn)
967 {
968 pci_devfn_t dev = PCI_DEV(0, PCI_SLOT(devfn), PCI_FUNC(devfn));
969 uint32_t dev_idle_ctrl = read_bar(dev, MMIO_CSE_DEVIDLE);
970 if ((dev_idle_ctrl & CSE_DEV_IDLE) == CSE_DEV_IDLE)
971 return DEV_IDLE;
972
973 return DEV_ACTIVE;
974 }
975
ensure_cse_active(pci_devfn_t dev)976 static enum cse_device_state ensure_cse_active(pci_devfn_t dev)
977 {
978 if (!disable_cse_idle(dev))
979 return DEV_IDLE;
980 pci_or_config32(dev, PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
981
982 return DEV_ACTIVE;
983 }
984
ensure_cse_idle(pci_devfn_t dev)985 static void ensure_cse_idle(pci_devfn_t dev)
986 {
987 enable_cse_idle(dev);
988
989 pci_and_config32(dev, PCI_COMMAND, ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER));
990 }
991
set_cse_device_state(unsigned int devfn,enum cse_device_state requested_state)992 bool set_cse_device_state(unsigned int devfn, enum cse_device_state requested_state)
993 {
994 enum cse_device_state current_state = get_cse_device_state(devfn);
995 pci_devfn_t dev = PCI_DEV(0, PCI_SLOT(devfn), PCI_FUNC(devfn));
996
997 if (current_state == requested_state)
998 return true;
999
1000 if (requested_state == DEV_ACTIVE)
1001 return ensure_cse_active(dev) == requested_state;
1002 else
1003 ensure_cse_idle(dev);
1004
1005 return true;
1006 }
1007
cse_set_to_d0i3(void)1008 void cse_set_to_d0i3(void)
1009 {
1010 if (!is_cse_devfn_visible(PCH_DEVFN_CSE))
1011 return;
1012
1013 set_cse_device_state(PCH_DEVFN_CSE, DEV_IDLE);
1014 }
1015
1016 /* Function to set D0I3 for all HECI devices */
heci_set_to_d0i3(void)1017 void heci_set_to_d0i3(void)
1018 {
1019 for (int i = 0; i < CONFIG_MAX_HECI_DEVICES; i++) {
1020 unsigned int devfn = PCI_DEVFN(PCH_DEV_SLOT_CSE, i);
1021 if (!is_cse_devfn_visible(devfn))
1022 continue;
1023
1024 set_cse_device_state(devfn, DEV_IDLE);
1025 }
1026 }
1027
1028 /* Initialize the HECI devices. */
heci_init(void)1029 void heci_init(void)
1030 {
1031 for (int i = 0; i < CONFIG_MAX_HECI_DEVICES; i++) {
1032 unsigned int devfn = PCI_DEVFN(PCH_DEV_SLOT_CSE, i);
1033 pci_devfn_t dev = PCI_DEV(0, PCI_SLOT(devfn), PCI_FUNC(devfn));
1034
1035 if (!is_cse_devfn_visible(devfn))
1036 continue;
1037
1038 /* Assume it is already initialized, nothing else to do */
1039 if (get_cse_bar(dev))
1040 return;
1041
1042 heci_assign_resource(dev, HECI1_BASE_ADDRESS + (i * HECI_BASE_SIZE));
1043
1044 ensure_cse_active(dev);
1045 }
1046 /* Trigger HECI Reset and make Host ready for communication with CSE */
1047 heci_reset();
1048 }
1049
cse_control_global_reset_lock(void)1050 void cse_control_global_reset_lock(void)
1051 {
1052 /*
1053 * As per ME BWG recommendation the BIOS should not lock down CF9GR bit during
1054 * manufacturing and re-manufacturing environment if HFSTS1 [4] is set. Note:
1055 * this recommendation is not applicable for CSE-Lite SKUs where BIOS should set
1056 * CF9LOCK bit irrespectively.
1057 *
1058 * Other than that, make sure payload/OS can't trigger global reset.
1059 *
1060 * BIOS must also ensure that CF9GR is cleared and locked (Bit31 of ETR3)
1061 * prior to transferring control to the OS.
1062 */
1063 if (CONFIG(SOC_INTEL_CSE_LITE_SKU) || cse_is_hfs1_spi_protected())
1064 pmc_global_reset_disable_and_lock();
1065 else
1066 pmc_global_reset_enable(false);
1067 }
1068
cse_get_fw_feature_state(uint32_t * feature_state)1069 enum cb_err cse_get_fw_feature_state(uint32_t *feature_state)
1070 {
1071 struct fw_feature_state_msg {
1072 struct mkhi_hdr hdr;
1073 uint32_t rule_id;
1074 } __packed;
1075
1076 /* Get Firmware Feature State message */
1077 struct fw_feature_state_msg msg = {
1078 .hdr = {
1079 .group_id = MKHI_GROUP_ID_FWCAPS,
1080 .command = MKHI_FWCAPS_GET_FW_FEATURE_STATE,
1081 },
1082 .rule_id = ME_FEATURE_STATE_RULE_ID
1083 };
1084
1085 /* Get Firmware Feature State response */
1086 struct fw_feature_state_resp {
1087 struct mkhi_hdr hdr;
1088 uint32_t rule_id;
1089 uint8_t rule_len;
1090 uint32_t fw_runtime_status;
1091 } __packed;
1092
1093 struct fw_feature_state_resp resp;
1094 size_t resp_size = sizeof(struct fw_feature_state_resp);
1095
1096 /* Ignore if CSE is disabled or input buffer is invalid */
1097 if (!is_cse_enabled() || !feature_state)
1098 return CB_ERR;
1099
1100 /*
1101 * Prerequisites:
1102 * 1) HFSTS1 Current Working State is Normal
1103 * 2) HFSTS1 Current Operation Mode is Normal
1104 * 3) It's after DRAM INIT DONE message (taken care of by calling it
1105 * during ramstage)
1106 */
1107 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal() || !ENV_RAMSTAGE)
1108 return CB_ERR;
1109
1110 printk(BIOS_DEBUG, "HECI: Send GET FW FEATURE STATE Command\n");
1111
1112 if (heci_send_receive(&msg, sizeof(struct fw_feature_state_msg),
1113 &resp, &resp_size, HECI_MKHI_ADDR))
1114 return CB_ERR;
1115
1116 if (resp.hdr.result) {
1117 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
1118 return CB_ERR;
1119 }
1120
1121 if (resp.rule_len != sizeof(resp.fw_runtime_status)) {
1122 printk(BIOS_ERR, "HECI: GET FW FEATURE STATE has invalid rule data length\n");
1123 return CB_ERR;
1124 }
1125
1126 *feature_state = resp.fw_runtime_status;
1127
1128 return CB_SUCCESS;
1129 }
1130
cse_enable_ptt(bool state)1131 void cse_enable_ptt(bool state)
1132 {
1133 struct fw_feature_shipment_override_msg {
1134 struct mkhi_hdr hdr;
1135 uint32_t enable_mask;
1136 uint32_t disable_mask;
1137 } __packed;
1138
1139 /* FW Feature Shipment Time State Override message */
1140 struct fw_feature_shipment_override_msg msg = {
1141 .hdr = {
1142 .group_id = MKHI_GROUP_ID_GEN,
1143 .command = MKHI_GEN_FW_FEATURE_SHIPMENT_OVER,
1144 },
1145 .enable_mask = 0,
1146 .disable_mask = 0
1147 };
1148
1149 /* FW Feature Shipment Time State Override response */
1150 struct fw_feature_shipment_override_resp {
1151 struct mkhi_hdr hdr;
1152 uint32_t data;
1153 } __packed;
1154
1155 struct fw_feature_shipment_override_resp resp;
1156 size_t resp_size = sizeof(struct fw_feature_shipment_override_resp);
1157 uint32_t feature_status;
1158
1159 /* Ignore if CSE is disabled */
1160 if (!is_cse_enabled())
1161 return;
1162
1163 printk(BIOS_DEBUG, "Requested to change PTT state to %sabled\n", state ? "en" : "dis");
1164
1165 /*
1166 * Prerequisites:
1167 * 1) HFSTS1 Current Working State is Normal
1168 * 2) HFSTS1 Current Operation Mode is Normal
1169 * 3) It's after DRAM INIT DONE message (taken care of by calling it
1170 * during ramstage
1171 * 4) HFSTS1 FW Init Complete is set
1172 * 5) Before EOP issued to CSE
1173 */
1174 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal() ||
1175 !cse_is_hfs1_fw_init_complete() || !ENV_RAMSTAGE) {
1176 printk(BIOS_ERR, "HECI: Unmet prerequisites for"
1177 "FW FEATURE SHIPMENT TIME STATE OVERRIDE\n");
1178 return;
1179 }
1180
1181 if (cse_get_fw_feature_state(&feature_status) != CB_SUCCESS) {
1182 printk(BIOS_ERR, "HECI: Cannot determine current feature status\n");
1183 return;
1184 }
1185
1186 if (!!(feature_status & ME_FW_FEATURE_PTT) == state) {
1187 printk(BIOS_DEBUG, "HECI: PTT is already in the requested state\n");
1188 return;
1189 }
1190
1191 printk(BIOS_DEBUG, "HECI: Send FW FEATURE SHIPMENT TIME STATE OVERRIDE Command\n");
1192
1193 if (state)
1194 msg.enable_mask |= ME_FW_FEATURE_PTT;
1195 else
1196 msg.disable_mask |= ME_FW_FEATURE_PTT;
1197
1198 if (heci_send_receive(&msg, sizeof(struct fw_feature_shipment_override_msg),
1199 &resp, &resp_size, HECI_MKHI_ADDR))
1200 return;
1201
1202 if (resp.hdr.result) {
1203 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
1204 return;
1205 }
1206
1207 /* Global reset is required after acceptance of the command */
1208 if (resp.data == 0) {
1209 printk(BIOS_DEBUG, "HECI: FW FEATURE SHIPMENT TIME STATE OVERRIDE success\n");
1210 do_global_reset();
1211 } else {
1212 printk(BIOS_ERR, "HECI: FW FEATURE SHIPMENT TIME STATE OVERRIDE error (%x)\n",
1213 resp.data);
1214 }
1215 }
1216
1217 #if ENV_RAMSTAGE
1218
1219 /*
1220 * Disable the Intel (CS)Management Engine via HECI based on a cmos value
1221 * of `me_state`. A value of `0` will result in a (CS)ME state of `0` (working)
1222 * and value of `1` will result in a (CS)ME state of `3` (disabled).
1223 *
1224 * It isn't advised to use this in combination with me_cleaner.
1225 *
1226 * It is advisable to have a second cmos option called `me_state_counter`.
1227 * Whilst not essential, it avoid reboots loops if the (CS)ME fails to
1228 * change states after 3 attempts. Some versions of the (CS)ME need to be
1229 * reset 3 times.
1230 *
1231 * Ideal cmos values would be:
1232 *
1233 * # coreboot config options: cpu
1234 * 432 1 e 5 me_state
1235 * 440 4 h 0 me_state_counter
1236 *
1237 * #ID value text
1238 * 5 0 Enable
1239 * 5 1 Disable
1240 */
1241
me_reset_with_count(void)1242 static void me_reset_with_count(void)
1243 {
1244 unsigned int cmos_me_state_counter = get_uint_option("me_state_counter", UINT_MAX);
1245
1246 if (cmos_me_state_counter != UINT_MAX) {
1247 printk(BIOS_DEBUG, "CMOS: me_state_counter = %u\n", cmos_me_state_counter);
1248 /* Avoid boot loops by only trying a state change 3 times */
1249 if (cmos_me_state_counter < ME_DISABLE_ATTEMPTS) {
1250 cmos_me_state_counter++;
1251 set_uint_option("me_state_counter", cmos_me_state_counter);
1252 printk(BIOS_DEBUG, "ME: Reset attempt %u/%u.\n", cmos_me_state_counter,
1253 ME_DISABLE_ATTEMPTS);
1254 do_global_reset();
1255 } else {
1256 /*
1257 * If the (CS)ME fails to change states after 3 attempts, it will
1258 * likely need a cold boot, or recovering.
1259 */
1260 printk(BIOS_ERR, "Failed to change ME state in %u attempts!\n",
1261 ME_DISABLE_ATTEMPTS);
1262 }
1263 } else {
1264 printk(BIOS_DEBUG, "ME: Resetting");
1265 do_global_reset();
1266 }
1267 }
1268
cse_set_state(struct device * dev)1269 static void cse_set_state(struct device *dev)
1270 {
1271 /* (CS)ME Disable Command */
1272 struct me_disable_command {
1273 struct mkhi_hdr hdr;
1274 uint32_t rule_id;
1275 uint8_t rule_len;
1276 uint32_t rule_data;
1277 } __packed me_disable = {
1278 .hdr = {
1279 .group_id = MKHI_GROUP_ID_FWCAPS,
1280 .command = MKHI_SET_ME_DISABLE,
1281 },
1282 .rule_id = ME_DISABLE_RULE_ID,
1283 .rule_len = ME_DISABLE_RULE_LENGTH,
1284 .rule_data = ME_DISABLE_COMMAND,
1285 };
1286
1287 struct me_disable_reply {
1288 struct mkhi_hdr hdr;
1289 uint32_t rule_id;
1290 } __packed;
1291
1292 struct me_disable_reply disable_reply;
1293
1294 size_t disable_reply_size;
1295
1296 /* (CS)ME Enable Command */
1297 struct me_enable_command {
1298 struct mkhi_hdr hdr;
1299 } me_enable = {
1300 .hdr = {
1301 .group_id = MKHI_GROUP_ID_BUP_COMMON,
1302 .command = MKHI_SET_ME_ENABLE,
1303 },
1304 };
1305
1306 struct me_enable_reply {
1307 struct mkhi_hdr hdr;
1308 } __packed;
1309
1310 struct me_enable_reply enable_reply;
1311
1312 size_t enable_reply_size;
1313
1314 /* Function Start */
1315
1316 int send;
1317 int result;
1318 /*
1319 * Check if the CMOS value "me_state" exists, if it doesn't, then
1320 * don't do anything.
1321 */
1322 const unsigned int cmos_me_state = get_uint_option("me_state", UINT_MAX);
1323
1324 if (cmos_me_state == UINT_MAX)
1325 return;
1326
1327 printk(BIOS_DEBUG, "CMOS: me_state = %u\n", cmos_me_state);
1328
1329 /*
1330 * We only take action if the me_state doesn't match the CS(ME) working state
1331 */
1332
1333 const unsigned int soft_temp_disable = cse_is_hfs1_com_soft_temp_disable();
1334
1335 if (cmos_me_state && !soft_temp_disable) {
1336 /* me_state should be disabled, but it's enabled */
1337 printk(BIOS_DEBUG, "ME needs to be disabled.\n");
1338 send = heci_send_receive(&me_disable, sizeof(me_disable),
1339 &disable_reply, &disable_reply_size, HECI_MKHI_ADDR);
1340 result = disable_reply.hdr.result;
1341 } else if (!cmos_me_state && soft_temp_disable) {
1342 /* me_state should be enabled, but it's disabled */
1343 printk(BIOS_DEBUG, "ME needs to be enabled.\n");
1344 send = heci_send_receive(&me_enable, sizeof(me_enable),
1345 &enable_reply, &enable_reply_size, HECI_MKHI_ADDR);
1346 result = enable_reply.hdr.result;
1347 } else {
1348 printk(BIOS_DEBUG, "ME is %s.\n", cmos_me_state ? "disabled" : "enabled");
1349 unsigned int cmos_me_state_counter = get_uint_option("me_state_counter",
1350 UINT_MAX);
1351 /* set me_state_counter to 0 */
1352 if ((cmos_me_state_counter != UINT_MAX && cmos_me_state_counter != 0))
1353 set_uint_option("me_state_counter", 0);
1354 return;
1355 }
1356
1357 printk(BIOS_DEBUG, "HECI: ME state change send %s!\n",
1358 !send ? "success" : "failure");
1359 printk(BIOS_DEBUG, "HECI: ME state change result %s!\n",
1360 result ? "success" : "failure");
1361
1362 /*
1363 * Reset if the result was successful, or if the send failed as some older
1364 * version of the Intel (CS)ME won't successfully receive the message unless reset
1365 * twice.
1366 */
1367 if (send || !result)
1368 me_reset_with_count();
1369 }
1370
1371 /*
1372 * `cse_final_ready_to_boot` function is native implementation of equivalent events
1373 * performed by FSP NotifyPhase(Ready To Boot) API invocations.
1374 *
1375 * Operations are:
1376 * 1. Perform global reset lock.
1377 * 2. Put HECI1 to D0i3 and disable the HECI1 if the user selects
1378 * DISABLE_HECI1_AT_PRE_BOOT config or CSE HFSTS1 Operation Mode is
1379 * `Software Temporary Disable`.
1380 */
cse_final_ready_to_boot(void)1381 static void cse_final_ready_to_boot(void)
1382 {
1383 cse_control_global_reset_lock();
1384
1385 if (CONFIG(DISABLE_HECI1_AT_PRE_BOOT) || cse_is_hfs1_com_soft_temp_disable()) {
1386 cse_set_to_d0i3();
1387 heci1_disable();
1388 }
1389 }
1390
1391 /*
1392 * `cse_final_end_of_firmware` function is native implementation of equivalent events
1393 * performed by FSP NotifyPhase(End of Firmware) API invocations.
1394 *
1395 * Operations are:
1396 * 1. Set D0I3 for all HECI devices.
1397 */
cse_final_end_of_firmware(void)1398 static void cse_final_end_of_firmware(void)
1399 {
1400 heci_set_to_d0i3();
1401 }
1402
1403 /*
1404 * This function to perform essential post EOP cse related operations
1405 * upon SoC selecting `SOC_INTEL_CSE_SEND_EOP_LATE` config
1406 */
cse_late_finalize(void)1407 void cse_late_finalize(void)
1408 {
1409 if (!CONFIG(SOC_INTEL_CSE_SEND_EOP_LATE) &&
1410 !CONFIG(SOC_INTEL_CSE_SEND_EOP_ASYNC))
1411 return;
1412
1413 if (!CONFIG(USE_FSP_NOTIFY_PHASE_READY_TO_BOOT))
1414 cse_final_ready_to_boot();
1415
1416 if (!CONFIG(USE_FSP_NOTIFY_PHASE_END_OF_FIRMWARE))
1417 cse_final_end_of_firmware();
1418 }
1419
intel_cse_get_rw_version(void)1420 static void intel_cse_get_rw_version(void)
1421 {
1422 if (CONFIG(SOC_INTEL_CSE_LITE_SYNC_BY_PAYLOAD))
1423 return;
1424
1425 struct cse_specific_info *info = cbmem_find(CBMEM_ID_CSE_INFO);
1426 if (info == NULL)
1427 return;
1428
1429 printk(BIOS_DEBUG, "CSE RW Firmware Version: %d.%d.%d.%d\n",
1430 info->cse_fwp_version.cur_cse_fw_version.major,
1431 info->cse_fwp_version.cur_cse_fw_version.minor,
1432 info->cse_fwp_version.cur_cse_fw_version.hotfix,
1433 info->cse_fwp_version.cur_cse_fw_version.build);
1434 }
1435
1436 /*
1437 * `cse_final` function is native implementation of equivalent events performed by
1438 * each FSP NotifyPhase() API invocations. It also displays CSE firmware version
1439 * if stored in CBMEM region.
1440 */
cse_final(struct device * dev)1441 static void cse_final(struct device *dev)
1442 {
1443 if (CONFIG(SOC_INTEL_STORE_CSE_FW_VERSION))
1444 intel_cse_get_rw_version();
1445 /*
1446 * SoC user can have three options for sending EOP:
1447 * 1. Choose to send EOP late
1448 * 2. Choose to send EOP cmd asynchronously
1449 * 3. Choose to send EOP cmd from payload i.e. skip here
1450 *
1451 * In case of sending EOP in asynchronous mode, the EOP command
1452 * has most likely not been completed yet. The finalization steps
1453 * will be run once the EOP command has successfully been completed.
1454 */
1455 if (CONFIG(SOC_INTEL_CSE_SEND_EOP_LATE) ||
1456 CONFIG(SOC_INTEL_CSE_SEND_EOP_BY_PAYLOAD) ||
1457 CONFIG(SOC_INTEL_CSE_SEND_EOP_ASYNC))
1458 return;
1459
1460 /* 1. Send EOP to CSE if not done.*/
1461 if (CONFIG(SOC_INTEL_CSE_SET_EOP))
1462 cse_send_end_of_post();
1463
1464 if (!CONFIG(USE_FSP_NOTIFY_PHASE_READY_TO_BOOT))
1465 cse_final_ready_to_boot();
1466
1467 if (!CONFIG(USE_FSP_NOTIFY_PHASE_END_OF_FIRMWARE))
1468 cse_final_end_of_firmware();
1469 }
1470
1471 struct device_operations cse_ops = {
1472 .set_resources = pci_dev_set_resources,
1473 .read_resources = pci_dev_read_resources,
1474 .enable_resources = pci_dev_enable_resources,
1475 .init = pci_dev_init,
1476 .ops_pci = &pci_dev_ops_pci,
1477 .enable = cse_set_state,
1478 .final = cse_final,
1479 };
1480
1481 static const unsigned short pci_device_ids[] = {
1482 PCI_DID_INTEL_PTL_CSE0,
1483 PCI_DID_INTEL_LNL_CSE0,
1484 PCI_DID_INTEL_MTL_CSE0,
1485 PCI_DID_INTEL_APL_CSE0,
1486 PCI_DID_INTEL_GLK_CSE0,
1487 PCI_DID_INTEL_CNL_CSE0,
1488 PCI_DID_INTEL_LWB_CSE0,
1489 PCI_DID_INTEL_LWB_CSE0_SUPER,
1490 PCI_DID_INTEL_CNP_H_CSE0,
1491 PCI_DID_INTEL_CMP_CSE0,
1492 PCI_DID_INTEL_CMP_H_CSE0,
1493 PCI_DID_INTEL_TGL_CSE0,
1494 PCI_DID_INTEL_TGL_H_CSE0,
1495 PCI_DID_INTEL_MCC_CSE0,
1496 PCI_DID_INTEL_MCC_CSE1,
1497 PCI_DID_INTEL_MCC_CSE2,
1498 PCI_DID_INTEL_MCC_CSE3,
1499 PCI_DID_INTEL_JSP_CSE0,
1500 PCI_DID_INTEL_JSP_CSE1,
1501 PCI_DID_INTEL_JSP_CSE2,
1502 PCI_DID_INTEL_JSP_CSE3,
1503 PCI_DID_INTEL_ADP_P_CSE0,
1504 PCI_DID_INTEL_ADP_P_CSE1,
1505 PCI_DID_INTEL_ADP_P_CSE2,
1506 PCI_DID_INTEL_ADP_P_CSE3,
1507 PCI_DID_INTEL_ADP_S_CSE0,
1508 PCI_DID_INTEL_ADP_S_CSE1,
1509 PCI_DID_INTEL_ADP_S_CSE2,
1510 PCI_DID_INTEL_ADP_S_CSE3,
1511 PCI_DID_INTEL_ADP_M_CSE0,
1512 PCI_DID_INTEL_ADP_M_CSE1,
1513 PCI_DID_INTEL_ADP_M_CSE2,
1514 PCI_DID_INTEL_ADP_M_CSE3,
1515 PCI_DID_INTEL_RPP_S_CSE0,
1516 PCI_DID_INTEL_RPP_S_CSE1,
1517 PCI_DID_INTEL_RPP_S_CSE2,
1518 PCI_DID_INTEL_RPP_S_CSE3,
1519 0,
1520 };
1521
1522 static const struct pci_driver cse_driver __pci_driver = {
1523 .ops = &cse_ops,
1524 .vendor = PCI_VID_INTEL,
1525 /* SoC/chipset needs to provide PCI device ID */
1526 .devices = pci_device_ids
1527 };
1528
1529 #endif
1530