1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
5  */
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma/qcom_adm.h>
11 #include <linux/dma/qcom_bam_dma.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-qpic-common.h>
17 
18 /**
19  * qcom_free_bam_transaction() - Frees the BAM transaction memory
20  * @nandc: qpic nand controller
21  *
22  * This function frees the bam transaction memory
23  */
qcom_free_bam_transaction(struct qcom_nand_controller * nandc)24 void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
25 {
26 	struct bam_transaction *bam_txn = nandc->bam_txn;
27 
28 	kfree(bam_txn);
29 }
30 EXPORT_SYMBOL(qcom_free_bam_transaction);
31 
32 /**
33  * qcom_alloc_bam_transaction() - allocate BAM transaction
34  * @nandc: qpic nand controller
35  *
36  * This function will allocate and initialize the BAM transaction structure
37  */
38 struct bam_transaction *
qcom_alloc_bam_transaction(struct qcom_nand_controller * nandc)39 qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
40 {
41 	struct bam_transaction *bam_txn;
42 	size_t bam_txn_size;
43 	unsigned int num_cw = nandc->max_cwperpage;
44 	void *bam_txn_buf;
45 
46 	bam_txn_size =
47 		sizeof(*bam_txn) + num_cw *
48 		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
49 		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
50 		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
51 
52 	bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
53 	if (!bam_txn_buf)
54 		return NULL;
55 
56 	bam_txn = bam_txn_buf;
57 	bam_txn_buf += sizeof(*bam_txn);
58 
59 	bam_txn->bam_ce = bam_txn_buf;
60 	bam_txn_buf +=
61 		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
62 
63 	bam_txn->cmd_sgl = bam_txn_buf;
64 	bam_txn_buf +=
65 		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
66 
67 	bam_txn->data_sgl = bam_txn_buf;
68 
69 	init_completion(&bam_txn->txn_done);
70 
71 	return bam_txn;
72 }
73 EXPORT_SYMBOL(qcom_alloc_bam_transaction);
74 
75 /**
76  * qcom_clear_bam_transaction() - Clears the BAM transaction
77  * @nandc: qpic nand controller
78  *
79  * This function will clear the BAM transaction indexes.
80  */
qcom_clear_bam_transaction(struct qcom_nand_controller * nandc)81 void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
82 {
83 	struct bam_transaction *bam_txn = nandc->bam_txn;
84 
85 	if (!nandc->props->supports_bam)
86 		return;
87 
88 	memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
89 	bam_txn->last_data_desc = NULL;
90 
91 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
92 		      QPIC_PER_CW_CMD_SGL);
93 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
94 		      QPIC_PER_CW_DATA_SGL);
95 
96 	reinit_completion(&bam_txn->txn_done);
97 }
98 EXPORT_SYMBOL(qcom_clear_bam_transaction);
99 
100 /**
101  * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
102  * @data: data pointer
103  *
104  * This function is a callback for DMA descriptor completion
105  */
qcom_qpic_bam_dma_done(void * data)106 void qcom_qpic_bam_dma_done(void *data)
107 {
108 	struct bam_transaction *bam_txn = data;
109 
110 	complete(&bam_txn->txn_done);
111 }
112 EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
113 
114 /**
115  * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
116  * @nandc: qpic nand controller
117  * @is_cpu: cpu or Device
118  *
119  * This function will check for dma sync for cpu or device
120  */
qcom_nandc_dev_to_mem(struct qcom_nand_controller * nandc,bool is_cpu)121 inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
122 {
123 	if (!nandc->props->supports_bam)
124 		return;
125 
126 	if (is_cpu)
127 		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
128 					MAX_REG_RD *
129 					sizeof(*nandc->reg_read_buf),
130 					DMA_FROM_DEVICE);
131 	else
132 		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
133 					   MAX_REG_RD *
134 					   sizeof(*nandc->reg_read_buf),
135 					   DMA_FROM_DEVICE);
136 }
137 EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
138 
139 /**
140  * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
141  * @nandc: qpic nand controller
142  * @chan: dma channel
143  * @flags: flags to control DMA descriptor preparation
144  *
145  * This function maps the scatter gather list for DMA transfer and forms the
146  * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
147  * descriptor queue which will be submitted to DMA engine.
148  */
qcom_prepare_bam_async_desc(struct qcom_nand_controller * nandc,struct dma_chan * chan,unsigned long flags)149 int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
150 				struct dma_chan *chan, unsigned long flags)
151 {
152 	struct desc_info *desc;
153 	struct scatterlist *sgl;
154 	unsigned int sgl_cnt;
155 	int ret;
156 	struct bam_transaction *bam_txn = nandc->bam_txn;
157 	enum dma_transfer_direction dir_eng;
158 	struct dma_async_tx_descriptor *dma_desc;
159 
160 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
161 	if (!desc)
162 		return -ENOMEM;
163 
164 	if (chan == nandc->cmd_chan) {
165 		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
166 		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
167 		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
168 		dir_eng = DMA_MEM_TO_DEV;
169 		desc->dir = DMA_TO_DEVICE;
170 	} else if (chan == nandc->tx_chan) {
171 		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
172 		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
173 		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
174 		dir_eng = DMA_MEM_TO_DEV;
175 		desc->dir = DMA_TO_DEVICE;
176 	} else {
177 		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
178 		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
179 		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
180 		dir_eng = DMA_DEV_TO_MEM;
181 		desc->dir = DMA_FROM_DEVICE;
182 	}
183 
184 	sg_mark_end(sgl + sgl_cnt - 1);
185 	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
186 	if (ret == 0) {
187 		dev_err(nandc->dev, "failure in mapping desc\n");
188 		kfree(desc);
189 		return -ENOMEM;
190 	}
191 
192 	desc->sgl_cnt = sgl_cnt;
193 	desc->bam_sgl = sgl;
194 
195 	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
196 					   flags);
197 
198 	if (!dma_desc) {
199 		dev_err(nandc->dev, "failure in prep desc\n");
200 		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
201 		kfree(desc);
202 		return -EINVAL;
203 	}
204 
205 	desc->dma_desc = dma_desc;
206 
207 	/* update last data/command descriptor */
208 	if (chan == nandc->cmd_chan)
209 		bam_txn->last_cmd_desc = dma_desc;
210 	else
211 		bam_txn->last_data_desc = dma_desc;
212 
213 	list_add_tail(&desc->node, &nandc->desc_list);
214 
215 	return 0;
216 }
217 EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
218 
219 /**
220  * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
221  * @nandc: qpic nand controller
222  * @read: read or write type
223  * @reg_off: offset within the controller's data buffer
224  * @vaddr: virtual address of the buffer we want to write to
225  * @size: DMA transaction size in bytes
226  * @flags: flags to control DMA descriptor preparation
227  *
228  * This function will prepares the command descriptor for BAM DMA
229  * which will be used for NAND register reads and writes.
230  */
qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,unsigned int flags)231 int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
232 			       int reg_off, const void *vaddr,
233 			       int size, unsigned int flags)
234 {
235 	int bam_ce_size;
236 	int i, ret;
237 	struct bam_cmd_element *bam_ce_buffer;
238 	struct bam_transaction *bam_txn = nandc->bam_txn;
239 
240 	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
241 
242 	/* fill the command desc */
243 	for (i = 0; i < size; i++) {
244 		if (read)
245 			bam_prep_ce(&bam_ce_buffer[i],
246 				    nandc_reg_phys(nandc, reg_off + 4 * i),
247 				    BAM_READ_COMMAND,
248 				    reg_buf_dma_addr(nandc,
249 						     (__le32 *)vaddr + i));
250 		else
251 			bam_prep_ce_le32(&bam_ce_buffer[i],
252 					 nandc_reg_phys(nandc, reg_off + 4 * i),
253 					 BAM_WRITE_COMMAND,
254 					 *((__le32 *)vaddr + i));
255 	}
256 
257 	bam_txn->bam_ce_pos += size;
258 
259 	/* use the separate sgl after this command */
260 	if (flags & NAND_BAM_NEXT_SGL) {
261 		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
262 		bam_ce_size = (bam_txn->bam_ce_pos -
263 				bam_txn->bam_ce_start) *
264 				sizeof(struct bam_cmd_element);
265 		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
266 			   bam_ce_buffer, bam_ce_size);
267 		bam_txn->cmd_sgl_pos++;
268 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
269 
270 		if (flags & NAND_BAM_NWD) {
271 			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
272 							  DMA_PREP_FENCE | DMA_PREP_CMD);
273 			if (ret)
274 				return ret;
275 		}
276 	}
277 
278 	return 0;
279 }
280 EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
281 
282 /**
283  * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
284  * @nandc: qpic nand controller
285  * @read: read or write type
286  * @vaddr: virtual address of the buffer we want to write to
287  * @size: DMA transaction size in bytes
288  * @flags: flags to control DMA descriptor preparation
289  *
290  * This function will prepares the data descriptor for BAM DMA which
291  * will be used for NAND data reads and writes.
292  */
qcom_prep_bam_dma_desc_data(struct qcom_nand_controller * nandc,bool read,const void * vaddr,int size,unsigned int flags)293 int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
294 				const void *vaddr, int size, unsigned int flags)
295 {
296 	int ret;
297 	struct bam_transaction *bam_txn = nandc->bam_txn;
298 
299 	if (read) {
300 		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
301 			   vaddr, size);
302 		bam_txn->rx_sgl_pos++;
303 	} else {
304 		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
305 			   vaddr, size);
306 		bam_txn->tx_sgl_pos++;
307 
308 		/*
309 		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
310 		 * is not set, form the DMA descriptor
311 		 */
312 		if (!(flags & NAND_BAM_NO_EOT)) {
313 			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
314 							  DMA_PREP_INTERRUPT);
315 			if (ret)
316 				return ret;
317 		}
318 	}
319 
320 	return 0;
321 }
322 EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
323 
324 /**
325  * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
326  * @nandc: qpic nand controller
327  * @read: read or write type
328  * @reg_off: offset within the controller's data buffer
329  * @vaddr: virtual address of the buffer we want to write to
330  * @size: adm dma transaction size in bytes
331  * @flow_control: flow controller
332  *
333  * This function will prepare descriptor for adma
334  */
qcom_prep_adm_dma_desc(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,bool flow_control)335 int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
336 			   int reg_off, const void *vaddr, int size,
337 			   bool flow_control)
338 {
339 	struct qcom_adm_peripheral_config periph_conf = {};
340 	struct dma_async_tx_descriptor *dma_desc;
341 	struct dma_slave_config slave_conf = {0};
342 	enum dma_transfer_direction dir_eng;
343 	struct desc_info *desc;
344 	struct scatterlist *sgl;
345 	int ret;
346 
347 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
348 	if (!desc)
349 		return -ENOMEM;
350 
351 	sgl = &desc->adm_sgl;
352 
353 	sg_init_one(sgl, vaddr, size);
354 
355 	if (read) {
356 		dir_eng = DMA_DEV_TO_MEM;
357 		desc->dir = DMA_FROM_DEVICE;
358 	} else {
359 		dir_eng = DMA_MEM_TO_DEV;
360 		desc->dir = DMA_TO_DEVICE;
361 	}
362 
363 	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
364 	if (!ret) {
365 		ret = -ENOMEM;
366 		goto err;
367 	}
368 
369 	slave_conf.device_fc = flow_control;
370 	if (read) {
371 		slave_conf.src_maxburst = 16;
372 		slave_conf.src_addr = nandc->base_dma + reg_off;
373 		if (nandc->data_crci) {
374 			periph_conf.crci = nandc->data_crci;
375 			slave_conf.peripheral_config = &periph_conf;
376 			slave_conf.peripheral_size = sizeof(periph_conf);
377 		}
378 	} else {
379 		slave_conf.dst_maxburst = 16;
380 		slave_conf.dst_addr = nandc->base_dma + reg_off;
381 		if (nandc->cmd_crci) {
382 			periph_conf.crci = nandc->cmd_crci;
383 			slave_conf.peripheral_config = &periph_conf;
384 			slave_conf.peripheral_size = sizeof(periph_conf);
385 		}
386 	}
387 
388 	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
389 	if (ret) {
390 		dev_err(nandc->dev, "failed to configure dma channel\n");
391 		goto err;
392 	}
393 
394 	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
395 	if (!dma_desc) {
396 		dev_err(nandc->dev, "failed to prepare desc\n");
397 		ret = -EINVAL;
398 		goto err;
399 	}
400 
401 	desc->dma_desc = dma_desc;
402 
403 	list_add_tail(&desc->node, &nandc->desc_list);
404 
405 	return 0;
406 err:
407 	kfree(desc);
408 
409 	return ret;
410 }
411 EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
412 
413 /**
414  * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
415  * @nandc: qpic nand controller
416  * @first: offset of the first register in the contiguous block
417  * @num_regs: number of registers to read
418  * @flags: flags to control DMA descriptor preparation
419  *
420  * This function will prepares a descriptor to read a given number of
421  * contiguous registers to the reg_read_buf pointer.
422  */
qcom_read_reg_dma(struct qcom_nand_controller * nandc,int first,int num_regs,unsigned int flags)423 int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
424 		      int num_regs, unsigned int flags)
425 {
426 	bool flow_control = false;
427 	void *vaddr;
428 
429 	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
430 	nandc->reg_read_pos += num_regs;
431 
432 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
433 		first = dev_cmd_reg_addr(nandc, first);
434 
435 	if (nandc->props->supports_bam)
436 		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
437 					     num_regs, flags);
438 
439 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
440 		flow_control = true;
441 
442 	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
443 				      num_regs * sizeof(u32), flow_control);
444 }
445 EXPORT_SYMBOL(qcom_read_reg_dma);
446 
447 /**
448  * qcom_write_reg_dma() - write a given number of registers
449  * @nandc: qpic nand controller
450  * @vaddr: contiguous memory from where register value will
451  *	   be written
452  * @first: offset of the first register in the contiguous block
453  * @num_regs: number of registers to write
454  * @flags: flags to control DMA descriptor preparation
455  *
456  * This function will prepares a descriptor to write a given number of
457  * contiguous registers
458  */
qcom_write_reg_dma(struct qcom_nand_controller * nandc,__le32 * vaddr,int first,int num_regs,unsigned int flags)459 int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
460 		       int first, int num_regs, unsigned int flags)
461 {
462 	bool flow_control = false;
463 
464 	if (first == NAND_EXEC_CMD)
465 		flags |= NAND_BAM_NWD;
466 
467 	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
468 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
469 
470 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
471 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
472 
473 	if (nandc->props->supports_bam)
474 		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
475 						  num_regs, flags);
476 
477 	if (first == NAND_FLASH_CMD)
478 		flow_control = true;
479 
480 	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
481 				      num_regs * sizeof(u32), flow_control);
482 }
483 EXPORT_SYMBOL(qcom_write_reg_dma);
484 
485 /**
486  * qcom_read_data_dma() - transfer data
487  * @nandc: qpic nand controller
488  * @reg_off: offset within the controller's data buffer
489  * @vaddr: virtual address of the buffer we want to write to
490  * @size: DMA transaction size in bytes
491  * @flags: flags to control DMA descriptor preparation
492  *
493  * This function will prepares a DMA descriptor to transfer data from the
494  * controller's internal buffer to the buffer 'vaddr'
495  */
qcom_read_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)496 int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
497 		       const u8 *vaddr, int size, unsigned int flags)
498 {
499 	if (nandc->props->supports_bam)
500 		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
501 
502 	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
503 }
504 EXPORT_SYMBOL(qcom_read_data_dma);
505 
506 /**
507  * qcom_write_data_dma() - transfer data
508  * @nandc: qpic nand controller
509  * @reg_off: offset within the controller's data buffer
510  * @vaddr: virtual address of the buffer we want to read from
511  * @size: DMA transaction size in bytes
512  * @flags: flags to control DMA descriptor preparation
513  *
514  * This function will prepares a DMA descriptor to transfer data from
515  * 'vaddr' to the controller's internal buffer
516  */
qcom_write_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)517 int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
518 			const u8 *vaddr, int size, unsigned int flags)
519 {
520 	if (nandc->props->supports_bam)
521 		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
522 
523 	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
524 }
525 EXPORT_SYMBOL(qcom_write_data_dma);
526 
527 /**
528  * qcom_submit_descs() - submit dma descriptor
529  * @nandc: qpic nand controller
530  *
531  * This function will submit all the prepared dma descriptor
532  * cmd or data descriptor
533  */
qcom_submit_descs(struct qcom_nand_controller * nandc)534 int qcom_submit_descs(struct qcom_nand_controller *nandc)
535 {
536 	struct desc_info *desc, *n;
537 	dma_cookie_t cookie = 0;
538 	struct bam_transaction *bam_txn = nandc->bam_txn;
539 	int ret = 0;
540 
541 	if (nandc->props->supports_bam) {
542 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
543 			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
544 			if (ret)
545 				goto err_unmap_free_desc;
546 		}
547 
548 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
549 			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
550 							  DMA_PREP_INTERRUPT);
551 			if (ret)
552 				goto err_unmap_free_desc;
553 		}
554 
555 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
556 			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
557 							  DMA_PREP_CMD);
558 			if (ret)
559 				goto err_unmap_free_desc;
560 		}
561 	}
562 
563 	list_for_each_entry(desc, &nandc->desc_list, node)
564 		cookie = dmaengine_submit(desc->dma_desc);
565 
566 	if (nandc->props->supports_bam) {
567 		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
568 		bam_txn->last_cmd_desc->callback_param = bam_txn;
569 
570 		dma_async_issue_pending(nandc->tx_chan);
571 		dma_async_issue_pending(nandc->rx_chan);
572 		dma_async_issue_pending(nandc->cmd_chan);
573 
574 		if (!wait_for_completion_timeout(&bam_txn->txn_done,
575 						 QPIC_NAND_COMPLETION_TIMEOUT))
576 			ret = -ETIMEDOUT;
577 	} else {
578 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
579 			ret = -ETIMEDOUT;
580 	}
581 
582 err_unmap_free_desc:
583 	/*
584 	 * Unmap the dma sg_list and free the desc allocated by both
585 	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
586 	 */
587 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
588 		list_del(&desc->node);
589 
590 		if (nandc->props->supports_bam)
591 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
592 				     desc->sgl_cnt, desc->dir);
593 		else
594 			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
595 				     desc->dir);
596 
597 		kfree(desc);
598 	}
599 
600 	return ret;
601 }
602 EXPORT_SYMBOL(qcom_submit_descs);
603 
604 /**
605  * qcom_clear_read_regs() - reset the read register buffer
606  * @nandc: qpic nand controller
607  *
608  * This function reset the register read buffer for next NAND operation
609  */
qcom_clear_read_regs(struct qcom_nand_controller * nandc)610 void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
611 {
612 	nandc->reg_read_pos = 0;
613 	qcom_nandc_dev_to_mem(nandc, false);
614 }
615 EXPORT_SYMBOL(qcom_clear_read_regs);
616 
617 /**
618  * qcom_nandc_unalloc() - unallocate qpic nand controller
619  * @nandc: qpic nand controller
620  *
621  * This function will unallocate memory alloacted for qpic nand controller
622  */
qcom_nandc_unalloc(struct qcom_nand_controller * nandc)623 void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
624 {
625 	if (nandc->props->supports_bam) {
626 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
627 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
628 					 MAX_REG_RD *
629 					 sizeof(*nandc->reg_read_buf),
630 					 DMA_FROM_DEVICE);
631 
632 		if (nandc->tx_chan)
633 			dma_release_channel(nandc->tx_chan);
634 
635 		if (nandc->rx_chan)
636 			dma_release_channel(nandc->rx_chan);
637 
638 		if (nandc->cmd_chan)
639 			dma_release_channel(nandc->cmd_chan);
640 	} else {
641 		if (nandc->chan)
642 			dma_release_channel(nandc->chan);
643 	}
644 }
645 EXPORT_SYMBOL(qcom_nandc_unalloc);
646 
647 /**
648  * qcom_nandc_alloc() - Allocate qpic nand controller
649  * @nandc: qpic nand controller
650  *
651  * This function will allocate memory for qpic nand controller
652  */
qcom_nandc_alloc(struct qcom_nand_controller * nandc)653 int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
654 {
655 	int ret;
656 
657 	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
658 	if (ret) {
659 		dev_err(nandc->dev, "failed to set DMA mask\n");
660 		return ret;
661 	}
662 
663 	/*
664 	 * we use the internal buffer for reading ONFI params, reading small
665 	 * data like ID and status, and preforming read-copy-write operations
666 	 * when writing to a codeword partially. 532 is the maximum possible
667 	 * size of a codeword for our nand controller
668 	 */
669 	nandc->buf_size = 532;
670 
671 	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
672 	if (!nandc->data_buffer)
673 		return -ENOMEM;
674 
675 	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
676 	if (!nandc->regs)
677 		return -ENOMEM;
678 
679 	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
680 					   sizeof(*nandc->reg_read_buf),
681 					   GFP_KERNEL);
682 	if (!nandc->reg_read_buf)
683 		return -ENOMEM;
684 
685 	if (nandc->props->supports_bam) {
686 		nandc->reg_read_dma =
687 			dma_map_single(nandc->dev, nandc->reg_read_buf,
688 				       MAX_REG_RD *
689 				       sizeof(*nandc->reg_read_buf),
690 				       DMA_FROM_DEVICE);
691 		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
692 			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
693 			return -EIO;
694 		}
695 
696 		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
697 		if (IS_ERR(nandc->tx_chan)) {
698 			ret = PTR_ERR(nandc->tx_chan);
699 			nandc->tx_chan = NULL;
700 			dev_err_probe(nandc->dev, ret,
701 				      "tx DMA channel request failed\n");
702 			goto unalloc;
703 		}
704 
705 		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
706 		if (IS_ERR(nandc->rx_chan)) {
707 			ret = PTR_ERR(nandc->rx_chan);
708 			nandc->rx_chan = NULL;
709 			dev_err_probe(nandc->dev, ret,
710 				      "rx DMA channel request failed\n");
711 			goto unalloc;
712 		}
713 
714 		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
715 		if (IS_ERR(nandc->cmd_chan)) {
716 			ret = PTR_ERR(nandc->cmd_chan);
717 			nandc->cmd_chan = NULL;
718 			dev_err_probe(nandc->dev, ret,
719 				      "cmd DMA channel request failed\n");
720 			goto unalloc;
721 		}
722 
723 		/*
724 		 * Initially allocate BAM transaction to read ONFI param page.
725 		 * After detecting all the devices, this BAM transaction will
726 		 * be freed and the next BAM transaction will be allocated with
727 		 * maximum codeword size
728 		 */
729 		nandc->max_cwperpage = 1;
730 		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
731 		if (!nandc->bam_txn) {
732 			dev_err(nandc->dev,
733 				"failed to allocate bam transaction\n");
734 			ret = -ENOMEM;
735 			goto unalloc;
736 		}
737 	} else {
738 		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
739 		if (IS_ERR(nandc->chan)) {
740 			ret = PTR_ERR(nandc->chan);
741 			nandc->chan = NULL;
742 			dev_err_probe(nandc->dev, ret,
743 				      "rxtx DMA channel request failed\n");
744 			return ret;
745 		}
746 	}
747 
748 	INIT_LIST_HEAD(&nandc->desc_list);
749 	INIT_LIST_HEAD(&nandc->host_list);
750 
751 	return 0;
752 unalloc:
753 	qcom_nandc_unalloc(nandc);
754 	return ret;
755 }
756 EXPORT_SYMBOL(qcom_nandc_alloc);
757 
758 MODULE_DESCRIPTION("QPIC controller common api");
759 MODULE_LICENSE("GPL");
760