xref: /aosp_15_r20/external/coreboot/src/soc/amd/common/block/lpc/spi_dma.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <amdblocks/lpc.h>
4 #include <amdblocks/spi.h>
5 #include <assert.h>
6 #include <boot_device.h>
7 #include <cbfs.h>
8 #include <commonlib/bsd/helpers.h>
9 #include <commonlib/region.h>
10 #include <console/console.h>
11 #include <delay.h>
12 #include <device/pci_ops.h>
13 #include <soc/pci_devs.h>
14 #include <spi_flash.h>
15 #include <string.h>
16 #include <thread.h>
17 #include <types.h>
18 
19 /* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
20 #define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
21 
22 struct spi_dma_transaction {
23 	uint8_t *destination;
24 	size_t source;
25 	size_t size;
26 	size_t transfer_size;
27 	size_t remaining;
28 };
29 
spi_dma_readat_mmap(const struct region_device * rd,void * b,size_t offset,size_t size)30 static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset,
31 				   size_t size)
32 {
33 	const struct mem_region_device *mdev;
34 
35 	mdev = container_of(rd, __typeof__(*mdev), rdev);
36 
37 	memcpy(b, &mdev->base[offset], size);
38 
39 	return size;
40 }
41 
spi_dma_is_busy(void)42 static bool spi_dma_is_busy(void)
43 {
44 	return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
45 	       & LPC_ROM_DMA_CTRL_START;
46 }
47 
spi_dma_has_error(void)48 static bool spi_dma_has_error(void)
49 {
50 	return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
51 	       & LPC_ROM_DMA_CTRL_ERROR;
52 }
53 
can_use_dma(void * destination,size_t source,size_t size)54 static bool can_use_dma(void *destination, size_t source, size_t size)
55 {
56 	/*
57 	 * Print a notice if reading more than 1024 bytes using mmap. This makes
58 	 * it easier to debug why the SPI DMA wasn't used.
59 	 */
60 	const size_t warning_size = 1024;
61 
62 	if (size < LPC_ROM_DMA_MIN_ALIGNMENT)
63 		return false;
64 
65 	if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) {
66 		if (size > warning_size)
67 			printk(BIOS_DEBUG, "Target %p is unaligned\n", destination);
68 		return false;
69 	}
70 
71 	if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) {
72 		if (size > warning_size)
73 			printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source);
74 		return false;
75 	}
76 
77 	return true;
78 }
79 
start_spi_dma_transaction(struct spi_dma_transaction * transaction)80 static void start_spi_dma_transaction(struct spi_dma_transaction *transaction)
81 {
82 	uint32_t ctrl;
83 
84 	printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__,
85 	       transaction->destination, transaction->source, transaction->remaining);
86 
87 	/*
88 	 * We should have complete control over the DMA controller, so there shouldn't
89 	 * be any outstanding transactions.
90 	 */
91 	assert(!spi_dma_is_busy());
92 	assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT));
93 	assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT));
94 	assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT);
95 
96 	pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source);
97 	pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR,
98 			   (uintptr_t)transaction->destination);
99 
100 	ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL);
101 	ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK;
102 
103 	transaction->transfer_size =
104 		MIN(LPC_ROM_DMA_CTRL_MAX_BYTES,
105 		    ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT));
106 
107 	ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size);
108 	ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */
109 	ctrl |= LPC_ROM_DMA_CTRL_START;
110 
111 	/*
112 	 * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA
113 	 * transaction.
114 	 */
115 	thread_mutex_lock(&spi_hw_mutex);
116 
117 	pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl);
118 }
119 
120 /* Returns true if transaction is still in progress. */
continue_spi_dma_transaction(const struct region_device * rd,struct spi_dma_transaction * transaction)121 static bool continue_spi_dma_transaction(const struct region_device *rd,
122 					 struct spi_dma_transaction *transaction)
123 {
124 	/* Verify we are looking at the correct transaction */
125 	assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source);
126 
127 	if (spi_dma_is_busy())
128 		return true;
129 
130 	/*
131 	 * Unlock the SPI mutex between DMA transactions to allow other users of the SPI
132 	 * controller to interleave their transactions.
133 	 */
134 	thread_mutex_unlock(&spi_hw_mutex);
135 
136 	if (spi_dma_has_error()) {
137 		printk(BIOS_ERR, "SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
138 		       transaction->destination, transaction->source,
139 		       transaction->transfer_size);
140 		return false;
141 	}
142 
143 	transaction->destination += transaction->transfer_size;
144 	transaction->source += transaction->transfer_size;
145 	transaction->remaining -= transaction->transfer_size;
146 
147 	if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) {
148 		start_spi_dma_transaction(transaction);
149 		return true;
150 	}
151 
152 	if (transaction->remaining > 0) {
153 		/* Use mmap to finish off the transfer */
154 		spi_dma_readat_mmap(rd, transaction->destination, transaction->source,
155 				    transaction->remaining);
156 
157 		transaction->destination += transaction->remaining;
158 		transaction->source += transaction->remaining;
159 		transaction->remaining -= transaction->remaining;
160 	}
161 
162 	return false;
163 }
164 
165 static struct thread_mutex spi_dma_hw_mutex;
166 
spi_dma_readat_dma(const struct region_device * rd,void * destination,size_t source,size_t size)167 static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination,
168 				  size_t source, size_t size)
169 {
170 	struct spi_dma_transaction transaction = {
171 		.destination = destination,
172 		.source = source,
173 		.size = size,
174 		.remaining = size,
175 	};
176 
177 	printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__,
178 	       destination, source, size);
179 
180 	thread_mutex_lock(&spi_dma_hw_mutex);
181 
182 	start_spi_dma_transaction(&transaction);
183 
184 	do {
185 		udelay(2);
186 	} while (continue_spi_dma_transaction(rd, &transaction));
187 
188 	thread_mutex_unlock(&spi_dma_hw_mutex);
189 
190 	printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
191 	       __func__, destination, source, transaction.remaining);
192 
193 	/* Allow queued up transaction to continue */
194 	thread_yield();
195 
196 	if (transaction.remaining)
197 		return -1;
198 
199 	return transaction.size;
200 }
201 
spi_dma_readat(const struct region_device * rd,void * b,size_t offset,size_t size)202 static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset,
203 			      size_t size)
204 {
205 	if (can_use_dma(b, offset, size))
206 		return spi_dma_readat_dma(rd, b, offset, size);
207 	else
208 		return spi_dma_readat_mmap(rd, b, offset, size);
209 }
210 
spi_dma_mmap(const struct region_device * rd,size_t offset,size_t size)211 static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size)
212 {
213 	const struct mem_region_device *mdev;
214 	void *mapping;
215 
216 	mdev = container_of(rd, __typeof__(*mdev), rdev);
217 
218 	if (!CONFIG_CBFS_CACHE_SIZE)
219 		return &mdev->base[offset];
220 
221 	mapping = mem_pool_alloc(&cbfs_cache, size);
222 	if (!mapping) {
223 		printk(BIOS_INFO, "%s: Could not allocate %zu bytes from memory pool\n",
224 		       __func__, size);
225 		/* Fall-back to memory map */
226 		return &mdev->base[offset];
227 	}
228 
229 	if (spi_dma_readat(rd, mapping, offset, size) != size) {
230 		printk(BIOS_ERR, "%s: Error reading into mmap buffer\n", __func__);
231 		mem_pool_free(&cbfs_cache, mapping);
232 		/* Fall-back to memory mapped read - not expected to fail atleast for now */
233 		spi_dma_readat_mmap(rd, mapping, offset, size);
234 	}
235 
236 	return mapping;
237 }
238 
spi_dma_munmap(const struct region_device * rd __always_unused,void * mapping)239 static int spi_dma_munmap(const struct region_device *rd __always_unused, void *mapping)
240 {
241 	if (CONFIG_CBFS_CACHE_SIZE)
242 		mem_pool_free(&cbfs_cache, mapping);
243 	return 0;
244 }
245 
246 const struct region_device_ops spi_dma_rdev_ro_ops = {
247 	.mmap = spi_dma_mmap,
248 	.munmap = spi_dma_munmap,
249 	.readat = spi_dma_readat,
250 };
251 
252 static const struct mem_region_device boot_dev = {
253 	.base = rom_base,
254 	.rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE),
255 };
256 
boot_device_ro(void)257 const struct region_device *boot_device_ro(void)
258 {
259 	return &boot_dev.rdev;
260 }
261 
spi_flash_get_mmap_windows(struct flash_mmap_window * table)262 uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table)
263 {
264 	table->flash_base = 0;
265 	table->host_base = (uint32_t)(uintptr_t)rom_base;
266 	table->size = CONFIG_ROM_SIZE;
267 
268 	return 1;
269 }
270 
271 /*
272  * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
273  * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
274  * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
275  * reliable on any prior generations.
276  */
spi_dma_fix(void)277 static void spi_dma_fix(void)
278 {
279 	/* Internal only registers */
280 	uint8_t val = spi_read8(0xfc);
281 	val |= BIT(6);
282 	spi_write8(0xfc, val);
283 }
284 
boot_device_init(void)285 void boot_device_init(void)
286 {
287 	spi_dma_fix();
288 }
289