xref: /aosp_15_r20/external/coreboot/src/soc/amd/common/block/spi/fch_spi_ctrl.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <spi_flash.h>
5 #include <soc/pci_devs.h>
6 #include <amdblocks/lpc.h>
7 #include <amdblocks/spi.h>
8 #include <device/pci_ops.h>
9 #include <lib.h>
10 #include <timer.h>
11 #include <types.h>
12 
13 #define GRANULARITY_TEST_4k		0x0000f000		/* bits 15-12 */
14 #define WORD_TO_DWORD_UPPER(x)		((x << 16) & 0xffff0000)
15 
16 /* SPI MMIO registers */
17 #define SPI_RESTRICTED_CMD1		0x04
18 #define SPI_RESTRICTED_CMD2		0x08
19 #define SPI_CNTRL1			0x0c
20 #define SPI_CMD_CODE			0x45
21 #define SPI_CMD_TRIGGER			0x47
22 #define   SPI_CMD_TRIGGER_EXECUTE	BIT(7)
23 #define SPI_TX_BYTE_COUNT		0x48
24 #define SPI_RX_BYTE_COUNT		0x4b
25 #define SPI_STATUS			0x4c
26 #define   SPI_DONE_BYTE_COUNT_SHIFT	0
27 #define   SPI_DONE_BYTE_COUNT_MASK	0xff
28 #define   SPI_FIFO_WR_PTR_SHIFT		8
29 #define   SPI_FIFO_WR_PTR_MASK		0x7f
30 #define   SPI_FIFO_RD_PTR_SHIFT		16
31 #define   SPI_FIFO_RD_PTR_MASK		0x7f
32 
33 enum spi_dump_state_phase {
34 	SPI_DUMP_STATE_BEFORE_CMD,
35 	SPI_DUMP_STATE_AFTER_CMD,
36 };
37 
dump_state(enum spi_dump_state_phase phase)38 static void dump_state(enum spi_dump_state_phase phase)
39 {
40 	u8 dump_size;
41 	uintptr_t addr;
42 	u32 status;
43 
44 	if (!CONFIG(SOC_AMD_COMMON_BLOCK_SPI_DEBUG))
45 		return;
46 
47 	switch (phase) {
48 	case SPI_DUMP_STATE_BEFORE_CMD:
49 		printk(BIOS_DEBUG, "SPI: Before execute\n");
50 		break;
51 	case SPI_DUMP_STATE_AFTER_CMD:
52 		printk(BIOS_DEBUG, "SPI: Transaction finished\n");
53 		break;
54 	default: /* We shouldn't reach this */
55 		return;
56 	}
57 
58 	printk(BIOS_DEBUG, "Cntrl0: %x\n", spi_read32(SPI_CNTRL0));
59 
60 	status = spi_read32(SPI_STATUS);
61 	printk(BIOS_DEBUG, "Status: %x\n", status);
62 	printk(BIOS_DEBUG,
63 	       "  Busy: %u, FIFO Read Pointer: %u, FIFO Write Pointer: %u, Done Bytes: %u\n",
64 	       !!(status & SPI_BUSY),
65 	       (status >> SPI_FIFO_RD_PTR_SHIFT) & SPI_FIFO_RD_PTR_MASK,
66 	       (status >> SPI_FIFO_WR_PTR_SHIFT) & SPI_FIFO_WR_PTR_MASK,
67 	       (status >> SPI_DONE_BYTE_COUNT_SHIFT) & SPI_DONE_BYTE_COUNT_MASK);
68 
69 	printk(BIOS_DEBUG, "CmdCode: %x\n", spi_read8(SPI_CMD_CODE));
70 
71 	addr = spi_get_bar() + SPI_FIFO;
72 
73 	switch (phase) {
74 	case SPI_DUMP_STATE_BEFORE_CMD:
75 		dump_size = spi_read8(SPI_TX_BYTE_COUNT);
76 		printk(BIOS_DEBUG, "TxByteCount: %x\n", dump_size);
77 		break;
78 	case SPI_DUMP_STATE_AFTER_CMD:
79 		dump_size = spi_read8(SPI_RX_BYTE_COUNT);
80 		printk(BIOS_DEBUG, "RxByteCount: %x\n", dump_size);
81 		addr += spi_read8(SPI_TX_BYTE_COUNT);
82 		break;
83 	}
84 
85 	if (dump_size > 0)
86 		hexdump((void *)addr, dump_size);
87 }
88 
wait_for_ready(void)89 static int wait_for_ready(void)
90 {
91 	const uint32_t timeout_ms = 500;
92 	struct stopwatch sw;
93 
94 	stopwatch_init_msecs_expire(&sw, timeout_ms);
95 
96 	do {
97 		if (!(spi_read32(SPI_STATUS) & SPI_BUSY))
98 			return 0;
99 	} while (!stopwatch_expired(&sw));
100 
101 	return -1;
102 }
103 
execute_command(void)104 static int execute_command(void)
105 {
106 	dump_state(SPI_DUMP_STATE_BEFORE_CMD);
107 
108 	spi_write8(SPI_CMD_TRIGGER, SPI_CMD_TRIGGER_EXECUTE);
109 
110 	if (wait_for_ready()) {
111 		printk(BIOS_ERR, "FCH SPI Error: Timeout executing command\n");
112 		return -1;
113 	}
114 
115 	dump_state(SPI_DUMP_STATE_AFTER_CMD);
116 
117 	return 0;
118 }
119 
spi_init(void)120 void spi_init(void)
121 {
122 	printk(BIOS_DEBUG, "%s: SPI BAR at 0x%08lx\n", __func__, spi_get_bar());
123 }
124 
spi_ctrlr_xfer(const struct spi_slave * slave,const void * dout,size_t bytesout,void * din,size_t bytesin)125 static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
126 			size_t bytesout, void *din, size_t bytesin)
127 {
128 	size_t count;
129 	uint8_t cmd;
130 	uint8_t *bufin = din;
131 	const uint8_t *bufout = dout;
132 
133 	if (CONFIG(SOC_AMD_COMMON_BLOCK_SPI_DEBUG))
134 		printk(BIOS_DEBUG, "%s(%zx, %zx)\n", __func__, bytesout, bytesin);
135 
136 	/* First byte is cmd which cannot be sent through FIFO */
137 	cmd = bufout[0];
138 	bufout++;
139 	bytesout--;
140 
141 	/*
142 	 * Check if this is a write command attempting to transfer more bytes
143 	 * than the controller can handle.  Iterations for writes are not
144 	 * supported here because each SPI write command needs to be preceded
145 	 * and followed by other SPI commands.
146 	 */
147 	if (bytesout + bytesin > SPI_FIFO_DEPTH) {
148 		printk(BIOS_WARNING, "FCH SPI: Too much to transfer, code error!\n");
149 		return -1;
150 	}
151 
152 	if (wait_for_ready()) {
153 		printk(BIOS_ERR, "FCH SPI: Failed to acquire the SPI bus\n");
154 		return -1;
155 	}
156 
157 	spi_write8(SPI_CMD_CODE, cmd);
158 	spi_write8(SPI_TX_BYTE_COUNT, bytesout);
159 	spi_write8(SPI_RX_BYTE_COUNT, bytesin);
160 
161 	for (count = 0; count < bytesout; count++)
162 		spi_write8(SPI_FIFO + count, bufout[count]);
163 
164 	if (execute_command())
165 		return -1;
166 
167 	for (count = 0; count < bytesin; count++)
168 		bufin[count] = spi_read8(SPI_FIFO + count + bytesout);
169 
170 	return 0;
171 }
172 
xfer_vectors(const struct spi_slave * slave,struct spi_op vectors[],size_t count)173 static int xfer_vectors(const struct spi_slave *slave,
174 			struct spi_op vectors[], size_t count)
175 {
176 	int rc;
177 
178 	thread_mutex_lock(&spi_hw_mutex);
179 	rc = spi_flash_vector_helper(slave, vectors, count, spi_ctrlr_xfer);
180 	thread_mutex_unlock(&spi_hw_mutex);
181 
182 	return rc;
183 }
184 
protect_a_range(u32 value)185 static int protect_a_range(u32 value)
186 {
187 	u32 reg32;
188 	u8 n;
189 
190 	/* find a free protection register */
191 	for (n = 0; n < MAX_ROM_PROTECT_RANGES; n++) {
192 		reg32 = pci_read_config32(SOC_LPC_DEV, ROM_PROTECT_RANGE_REG(n));
193 		if (!reg32)
194 			break;
195 	}
196 	if (n == MAX_ROM_PROTECT_RANGES)
197 		return -1; /* no free range */
198 
199 	pci_write_config32(SOC_LPC_DEV, ROM_PROTECT_RANGE_REG(n), value);
200 	return 0;
201 }
202 
203 /*
204  * Protect range of SPI flash defined by region using the SPI flash controller.
205  *
206  * Note: Up to 4 ranges can be protected, though if a particular region requires more than one
207  * range, total number of regions decreases accordingly. Each range can be programmed to 4KiB or
208  * 64KiB granularity.
209  *
210  * Warning: If more than 1 region needs protection, and they need mixed protections (read/write)
211  * than start with the region that requires the most protection. After the restricted commands
212  * have been written, they can't be changed (write once). So if first region is write protection
213  * and second region is read protection, it's best to define first region as read and write
214  * protection.
215  */
fch_spi_flash_protect(const struct spi_flash * flash,const struct region * region,const enum ctrlr_prot_type type)216 static int fch_spi_flash_protect(const struct spi_flash *flash, const struct region *region,
217 				 const enum ctrlr_prot_type type)
218 {
219 	int ret;
220 	u32 reg32, rom_base, range_base;
221 	size_t addr, len, gran_value, total_ranges, range;
222 	bool granularity_64k = true; /* assume 64k granularity */
223 
224 	addr = region_offset(region);
225 	len = region_sz(region);
226 
227 	reg32 = pci_read_config32(SOC_LPC_DEV, ROM_ADDRESS_RANGE2_START);
228 	rom_base = WORD_TO_DWORD_UPPER(reg32);
229 	if (addr < rom_base)
230 		return -1;
231 	range_base = addr % rom_base;
232 
233 	/* Define granularity to be used */
234 	if (GRANULARITY_TEST_4k & range_base)
235 		granularity_64k = false; /* use 4K granularity */
236 	if (GRANULARITY_TEST_4k & len)
237 		granularity_64k = false; /* use 4K granularity */
238 
239 	/* Define the first range and total number of ranges required */
240 	if (granularity_64k) {
241 		gran_value = 0x00010000; /* 64 KiB */
242 		range_base = range_base >> 16;
243 	} else {
244 		gran_value = 0x00001000; /* 4 KiB */
245 		range_base = range_base >> 12;
246 	}
247 	total_ranges = len / gran_value;
248 	range_base &= RANGE_ADDR_MASK;
249 
250 	/* Create reg32 to be written into a range register and program required ranges */
251 	reg32 = rom_base & ROM_BASE_MASK;
252 	reg32 |= range_base;
253 	if (granularity_64k)
254 		reg32 |= RANGE_UNIT;
255 	if (type & WRITE_PROTECT)
256 		reg32 |= ROM_RANGE_WP;
257 	if (type & READ_PROTECT)
258 		reg32 |= ROM_RANGE_RP;
259 
260 	for (range = 0; range < total_ranges; range++) {
261 		ret = protect_a_range(reg32);
262 		if (ret)
263 			return ret;
264 		/*
265 		 * Next range (lower 8 bits). Range points to the start address of a region.
266 		 * The range value must be multiplied by the granularity (which is also the
267 		 * size of the region) to get the actual offset from the SPI start address.
268 		 */
269 		reg32++;
270 	}
271 
272 	/* define commands to be blocked if in range */
273 	reg32 = 0;
274 	if (type & WRITE_PROTECT) {
275 		/* FIXME */
276 		printk(BIOS_INFO, "%s: Write Enable and Write Cmd not blocked\n", __func__);
277 		reg32 |= (flash->erase_cmd << 8);
278 	}
279 	if (type & READ_PROTECT) {
280 		/* FIXME */
281 		printk(BIOS_INFO, "%s: READ_PROTECT not supported.\n", __func__);
282 	}
283 
284 	/* Final steps to protect region */
285 	spi_write32(SPI_RESTRICTED_CMD1, reg32);
286 	reg32 = spi_read32(SPI_CNTRL0);
287 	reg32 &= ~SPI_ACCESS_MAC_ROM_EN;
288 	spi_write32(SPI_CNTRL0, reg32);
289 
290 	return 0;
291 }
292 
293 static const struct spi_ctrlr fch_spi_flash_ctrlr = {
294 	.xfer_vector = xfer_vectors,
295 	.max_xfer_size = SPI_FIFO_DEPTH,
296 	.flags = SPI_CNTRLR_DEDUCT_CMD_LEN | SPI_CNTRLR_DEDUCT_OPCODE_LEN,
297 	.flash_protect = fch_spi_flash_protect,
298 };
299 
300 const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
301 	{
302 		.ctrlr = &fch_spi_flash_ctrlr,
303 		.bus_start = 0,
304 		.bus_end = 0,
305 	},
306 };
307 
308 const size_t spi_ctrlr_bus_map_count = ARRAY_SIZE(spi_ctrlr_bus_map);
309