1 /*
2  * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/cadence/cdns_nand.h>
15 #include <drivers/delay_timer.h>
16 #include <lib/mmio.h>
17 #include <lib/utils.h>
18 #include <platform_def.h>
19 
20 /* NAND flash device information struct */
21 static cnf_dev_info_t dev_info;
22 
23 /*
24  * Scratch buffers for read and write operations
25  * DMA transfer of Cadence NAND expects data 8 bytes aligned
26  * to be written to register
27  */
28 static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE] __aligned(8);
29 
30 /* Wait for controller to be in idle state */
cdns_nand_wait_idle(void)31 static inline void cdns_nand_wait_idle(void)
32 {
33 	uint32_t reg = 0U;
34 
35 	do {
36 		udelay(CNF_DEF_DELAY_US);
37 		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
38 	} while (CNF_GET_CTRL_BUSY(reg) != 0U);
39 }
40 
41 /* Wait for given thread to be in ready state */
cdns_nand_wait_thread_ready(uint8_t thread_id)42 static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
43 {
44 	uint32_t reg = 0U;
45 
46 	do {
47 		udelay(CNF_DEF_DELAY_US);
48 		reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
49 		reg &= (1U << (uint32_t)thread_id);
50 	} while (reg != 0U);
51 }
52 
53 /* Check if the last operation/command in selected thread is completed */
cdns_nand_last_opr_status(uint8_t thread_id)54 static int cdns_nand_last_opr_status(uint8_t thread_id)
55 {
56 	uint8_t nthreads = 0U;
57 	uint32_t reg = 0U;
58 
59 	/* Get number of threads */
60 	reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
61 	nthreads = CNF_GET_NTHREADS(reg);
62 
63 	if (thread_id > nthreads) {
64 		ERROR("%s: Invalid thread ID\n", __func__);
65 		return -EINVAL;
66 	}
67 
68 	/* Select thread */
69 	mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
70 
71 	uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
72 				CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
73 
74 	do {
75 		udelay(CNF_DEF_DELAY_US * 2);
76 		reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
77 	} while ((reg & CNF_CMPLT) == 0U);
78 
79 	/* last operation is completed, make sure no other error bits are set */
80 	if ((reg & err_mask) == 1U) {
81 		ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
82 		return -EIO;
83 	}
84 
85 	return 0;
86 }
87 
88 /* Set feature command */
cdns_nand_set_feature(uint8_t feat_addr,uint8_t feat_val,uint8_t thread_id)89 int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
90 {
91 	/* Wait for thread to be ready */
92 	cdns_nand_wait_thread_ready(thread_id);
93 
94 	/* Set feature address */
95 	mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
96 	/* Set feature volume */
97 	mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
98 
99 	/* Set feature command */
100 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
101 
102 	reg |= (thread_id << CNF_CMDREG0_TRD);
103 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
104 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
105 	reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
106 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
107 
108 	return cdns_nand_last_opr_status(thread_id);
109 }
110 
111 /* Reset command to the selected device */
cdns_nand_reset(uint8_t thread_id)112 int cdns_nand_reset(uint8_t thread_id)
113 {
114 	/* Operation is executed in selected thread */
115 	cdns_nand_wait_thread_ready(thread_id);
116 
117 	/* Select memory */
118 	mmio_write_32(CNF_CMDREG(CMD_REG4),
119 			(CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
120 
121 	/* Issue reset command */
122 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
123 
124 	reg |= (thread_id << CNF_CMDREG0_TRD);
125 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
126 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
127 	reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
128 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
129 
130 	return cdns_nand_last_opr_status(thread_id);
131 }
132 
133 /* Set operation work mode */
cdns_nand_set_opr_mode(uint8_t opr_mode)134 static void cdns_nand_set_opr_mode(uint8_t opr_mode)
135 {
136 	/* Wait for controller to be in idle state */
137 	cdns_nand_wait_idle();
138 
139 	/* Reset DLL PHY */
140 	uint32_t reg = mmio_read_32(CNF_MINICTRL(DLL_PHY_CTRL));
141 
142 	reg &= ~(1 << CNF_DLL_PHY_RST_N);
143 	mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
144 
145 	if (opr_mode == CNF_OPR_WORK_MODE_SDR) {
146 		/* Combo PHY Control Timing Block register settings */
147 		mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
148 		mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
149 
150 		/* Combo PHY DLL register settings */
151 		mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
152 		mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
153 		mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
154 		mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
155 
156 		/* Async mode timing settings */
157 		mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS),
158 				(2 << CNF_ASYNC_TIMINGS_TRH) |
159 				(4 << CNF_ASYNC_TIMINGS_TRP) |
160 				(2 << CNF_ASYNC_TIMINGS_TWH) |
161 				(4 << CNF_ASYNC_TIMINGS_TWP));
162 
163 		/* Set extended read and write mode */
164 		reg |= (1 << CNF_DLL_PHY_EXT_RD_MODE);
165 		reg |= (1 << CNF_DLL_PHY_EXT_WR_MODE);
166 
167 		/* Set operation work mode in common settings */
168 		mmio_clrsetbits_32(CNF_MINICTRL(CMN_SETTINGS),
169 				CNF_CMN_SETTINGS_OPR_MASK,
170 				CNF_OPR_WORK_MODE_SDR);
171 	} else if (opr_mode == CNF_OPR_WORK_MODE_NVDDR) {
172 		; /* ToDo: add DDR mode settings also once available on SIMICS */
173 	} else {
174 		;
175 	}
176 
177 	reg |= (1 << CNF_DLL_PHY_RST_N);
178 	mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
179 }
180 
181 /* Data transfer configuration */
cdns_nand_transfer_config(void)182 static void cdns_nand_transfer_config(void)
183 {
184 	/* Wait for controller to be in idle state */
185 	cdns_nand_wait_idle();
186 
187 	/* Configure data transfer parameters */
188 	mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), 1);
189 
190 	/* ECC is disabled */
191 	mmio_write_32(CNF_CTRLCFG(ECC_CFG0), 0);
192 
193 	/* DMA burst select */
194 	mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
195 			(CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
196 			(1 << CNF_DMA_SETTINGS_OTE));
197 
198 	/* Enable pre-fetching for 1K */
199 	mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
200 			(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
201 			(CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
202 
203 	/* Select access type */
204 	mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
205 	mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
206 }
207 
208 /* Update the nand flash device info */
cdns_nand_update_dev_info(void)209 static int cdns_nand_update_dev_info(void)
210 {
211 	uint32_t reg = 0U;
212 
213 	/* Read the device type and number of LUNs */
214 	reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
215 	dev_info.type = CNF_GET_DEV_TYPE(reg);
216 	if (dev_info.type == CNF_DT_UNKNOWN) {
217 		ERROR("%s: device type unknown\n", __func__);
218 		return -ENXIO;
219 	}
220 	dev_info.nluns = CNF_GET_NLUNS(reg);
221 
222 	/* Pages per block */
223 	reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
224 	dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
225 
226 	/* Sector size and last sector size */
227 	reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
228 	dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
229 	dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
230 
231 	/* Page size and spare size */
232 	reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
233 	dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
234 	dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
235 
236 	/* Device blocks per LUN */
237 	dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
238 
239 	/* Calculate block size and total device size */
240 	dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
241 	dev_info.total_size = ((unsigned long long)dev_info.block_size *
242 				(unsigned long long)dev_info.nblocks_per_lun *
243 				dev_info.nluns);
244 
245 	VERBOSE("CNF params: page_size %d, spare_size %d, block_size %u, total_size %llu\n",
246 		dev_info.page_size, dev_info.spare_size,
247 		dev_info.block_size, dev_info.total_size);
248 
249 	return 0;
250 }
251 
252 /* NAND Flash Controller/Host initialization */
cdns_nand_host_init(void)253 int cdns_nand_host_init(void)
254 {
255 	uint32_t reg = 0U;
256 	int ret = 0;
257 
258 	do {
259 		/* Read controller status register for init complete */
260 		reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
261 	} while (CNF_GET_INIT_COMP(reg) == 0);
262 
263 	ret = cdns_nand_update_dev_info();
264 	if (ret != 0) {
265 		return ret;
266 	}
267 
268 	INFO("CNF: device discovery process completed and device type %d\n",
269 			dev_info.type);
270 
271 	/* Enable data integrity, enable CRC and parity */
272 	reg = mmio_read_32(CNF_DI(CONTROL));
273 	reg |= (1 << CNF_DI_PAR_EN);
274 	reg |= (1 << CNF_DI_CRC_EN);
275 	mmio_write_32(CNF_DI(CONTROL), reg);
276 
277 	/* Status polling mode, device control and status register */
278 	cdns_nand_wait_idle();
279 	reg = mmio_read_32(CNF_CTRLCFG(DEV_STAT));
280 	reg = reg & ~1;
281 	mmio_write_32(CNF_CTRLCFG(DEV_STAT), reg);
282 
283 	/* Set operation work mode */
284 	cdns_nand_set_opr_mode(CNF_OPR_WORK_MODE_SDR);
285 
286 	/* Set data transfer configuration parameters */
287 	cdns_nand_transfer_config();
288 
289 	return 0;
290 }
291 
292 /* erase: Block erase command */
cdns_nand_erase(uint32_t offset,uint32_t size)293 int cdns_nand_erase(uint32_t offset, uint32_t size)
294 {
295 	/* Determine the starting block offset i.e row address */
296 	uint32_t row_address = dev_info.npages_per_block * offset;
297 
298 	/* Wait for thread to be in ready state */
299 	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
300 
301 	/*Set row address */
302 	mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
303 
304 	/* Operation bank number */
305 	mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
306 
307 	/* Block erase command */
308 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
309 
310 	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
311 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
312 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
313 	reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
314 	reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
315 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
316 
317 	/* Wait for erase operation to complete */
318 	return cdns_nand_last_opr_status(CNF_DEF_TRD);
319 }
320 
321 /* io mtd functions */
cdns_nand_init_mtd(unsigned long long * size,unsigned int * erase_size)322 int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
323 {
324 	*size = dev_info.total_size;
325 	*erase_size = dev_info.block_size;
326 
327 	return 0;
328 }
329 
cdns_nand_get_row_address(uint32_t page,uint32_t block)330 static uint32_t cdns_nand_get_row_address(uint32_t page, uint32_t block)
331 {
332 	uint32_t row_address = 0U;
333 	uint32_t req_bits = 0U;
334 
335 	/* The device info is not populated yet. */
336 	if (dev_info.npages_per_block == 0U)
337 		return 0;
338 
339 	for (uint32_t i = 0U; i < sizeof(uint32_t) * 8; i++) {
340 		if ((1U << i) & dev_info.npages_per_block)
341 			req_bits = i;
342 	}
343 
344 	row_address = ((page & GENMASK_32((req_bits - 1), 0)) |
345 			(block << req_bits));
346 
347 	return row_address;
348 }
349 
350 /* NAND Flash page read */
cdns_nand_read_page(uint32_t block,uint32_t page,uintptr_t buffer)351 static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
352 {
353 
354 	/* Wait for thread to be ready */
355 	cdns_nand_wait_thread_ready(CNF_DEF_TRD);
356 
357 	/* Select device */
358 	mmio_write_32(CNF_CMDREG(CMD_REG4),
359 			(CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
360 
361 	/* Set host memory address for DMA transfers */
362 	mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & UINT32_MAX));
363 	mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & UINT32_MAX));
364 
365 	/* Set row address */
366 	mmio_write_32(CNF_CMDREG(CMD_REG1),
367 			cdns_nand_get_row_address(page, block));
368 
369 	/* Page read command */
370 	uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
371 
372 	reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
373 	reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
374 	reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
375 	reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
376 	reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
377 	reg |= (((CNF_READ_SINGLE_PAGE-1) & 0xFF) << CNF_CMDREG0_CMD);
378 	mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
379 
380 	/* Wait for read operation to complete */
381 	if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
382 		ERROR("%s: Page read failed\n", __func__);
383 		return -EIO;
384 	}
385 
386 	return 0;
387 }
388 
cdns_nand_read(unsigned int offset,uintptr_t buffer,size_t length,size_t * out_length)389 int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
390 					size_t *out_length)
391 {
392 	uint32_t block = offset / dev_info.block_size;
393 	uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
394 	uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
395 	uint32_t start_offset = offset % dev_info.page_size;
396 	uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
397 	uint32_t bytes_read = 0U;
398 	uint32_t page = 0U;
399 	int result = 0;
400 
401 	INFO("CNF: %s: block %u-%u, page_start %u, len %zu, offset %u\n",
402 		__func__, block, end_block, page_start, length, offset);
403 
404 	if ((offset >= dev_info.total_size) ||
405 		(offset + length-1 >= dev_info.total_size) ||
406 		(length == 0U)) {
407 		ERROR("CNF: Invalid read parameters\n");
408 		return -EINVAL;
409 	}
410 
411 	*out_length = 0UL;
412 
413 	while (block <= end_block) {
414 		for (page = page_start; page < nb_pages; page++) {
415 			if ((start_offset != 0U) || (length < dev_info.page_size)) {
416 				/* Partial page read */
417 				result = cdns_nand_read_page(block, page,
418 							(uintptr_t)scratch_buff);
419 				if (result != 0) {
420 					return result;
421 				}
422 
423 				bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
424 								length);
425 
426 				memcpy((uint8_t *)buffer, scratch_buff + start_offset,
427 						bytes_read);
428 				start_offset = 0U;
429 			} else {
430 				/* Full page read */
431 				result = cdns_nand_read_page(block, page,
432 				(uintptr_t)scratch_buff);
433 				if (result != 0) {
434 					return result;
435 				}
436 
437 				bytes_read = dev_info.page_size;
438 				memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
439 			}
440 
441 			length -= bytes_read;
442 			buffer += bytes_read;
443 			*out_length += bytes_read;
444 
445 			/* All the bytes have read */
446 			if (length == 0U) {
447 				break;
448 			}
449 
450 			udelay(CNF_READ_INT_DELAY_US);
451 		} /* for */
452 
453 		page_start = 0U;
454 		block++;
455 	} /* while */
456 
457 	return 0;
458 }
459