1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
28 *
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
33 *
34 */
35
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_irq.h>
47 #include <linux/platform_device.h>
48 #include <linux/slab.h>
49 #include <linux/clk.h>
50 #include <linux/io-64-nonatomic-lo-hi.h>
51
52 #include "../dmaengine.h"
53
54 /* Register/Descriptor Offsets */
55 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
59
60 /* Control Registers */
61 #define XILINX_DMA_REG_DMACR 0x0000
62 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
63 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
64 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
70 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73 #define XILINX_DMA_DMACR_RESET BIT(2)
74 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
80
81 #define XILINX_DMA_REG_DMASR 0x0004
82 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
95 #define XILINX_DMA_DMASR_IDLE BIT(1)
96 #define XILINX_DMA_DMASR_HALTED BIT(0)
97 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
99
100 #define XILINX_DMA_REG_CURDESC 0x0008
101 #define XILINX_DMA_REG_TAILDESC 0x0010
102 #define XILINX_DMA_REG_REG_INDEX 0x0014
103 #define XILINX_DMA_REG_FRMSTORE 0x0018
104 #define XILINX_DMA_REG_THRESHOLD 0x001c
105 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
106 #define XILINX_DMA_REG_PARK_PTR 0x0028
107 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
112
113 /* Register Direct Mode Registers */
114 #define XILINX_DMA_REG_VSIZE 0x0000
115 #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
116 #define XILINX_DMA_REG_HSIZE 0x0004
117 #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
118
119 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
120 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
121 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
122
123 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
124 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
125
126 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
127 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
128
129 /* HW specific definitions */
130 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
131 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
132 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
133
134 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
135 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
136 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
137 XILINX_DMA_DMASR_ERR_IRQ)
138
139 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
140 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
141 XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 XILINX_DMA_DMASR_SG_DEC_ERR | \
143 XILINX_DMA_DMASR_SG_SLV_ERR | \
144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_DMA_DEC_ERR | \
147 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
148 XILINX_DMA_DMASR_DMA_INT_ERR)
149
150 /*
151 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
152 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
153 * is enabled in the h/w system.
154 */
155 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
156 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
157 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
158 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
159 XILINX_DMA_DMASR_DMA_INT_ERR)
160
161 /* Axi VDMA Flush on Fsync bits */
162 #define XILINX_DMA_FLUSH_S2MM 3
163 #define XILINX_DMA_FLUSH_MM2S 2
164 #define XILINX_DMA_FLUSH_BOTH 1
165
166 /* Delay loop counter to prevent hardware failure */
167 #define XILINX_DMA_LOOP_COUNT 1000000
168
169 /* AXI DMA Specific Registers/Offsets */
170 #define XILINX_DMA_REG_SRCDSTADDR 0x18
171 #define XILINX_DMA_REG_BTT 0x28
172
173 /* AXI DMA Specific Masks/Bit fields */
174 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
175 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
176 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
177 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
178 #define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
179 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
180 #define XILINX_DMA_CR_COALESCE_SHIFT 16
181 #define XILINX_DMA_CR_DELAY_SHIFT 24
182 #define XILINX_DMA_BD_SOP BIT(27)
183 #define XILINX_DMA_BD_EOP BIT(26)
184 #define XILINX_DMA_BD_COMP_MASK BIT(31)
185 #define XILINX_DMA_COALESCE_MAX 255
186 #define XILINX_DMA_NUM_DESCS 512
187 #define XILINX_DMA_NUM_APP_WORDS 5
188
189 /* AXI CDMA Specific Registers/Offsets */
190 #define XILINX_CDMA_REG_SRCADDR 0x18
191 #define XILINX_CDMA_REG_DSTADDR 0x20
192
193 /* AXI CDMA Specific Masks */
194 #define XILINX_CDMA_CR_SGMODE BIT(3)
195
196 #define xilinx_prep_dma_addr_t(addr) \
197 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
198
199 /* AXI MCDMA Specific Registers/Offsets */
200 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
201 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
202 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
203 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
204 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
205 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
206 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
207 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
208 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
209 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
210
211 /* AXI MCDMA Specific Masks/Shifts */
212 #define XILINX_MCDMA_COALESCE_SHIFT 16
213 #define XILINX_MCDMA_COALESCE_MAX 24
214 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
215 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
216 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
217 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
218 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
219 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
220 #define XILINX_MCDMA_BD_EOP BIT(30)
221 #define XILINX_MCDMA_BD_SOP BIT(31)
222
223 /**
224 * struct xilinx_vdma_desc_hw - Hardware Descriptor
225 * @next_desc: Next Descriptor Pointer @0x00
226 * @pad1: Reserved @0x04
227 * @buf_addr: Buffer address @0x08
228 * @buf_addr_msb: MSB of Buffer address @0x0C
229 * @vsize: Vertical Size @0x10
230 * @hsize: Horizontal Size @0x14
231 * @stride: Number of bytes between the first
232 * pixels of each horizontal line @0x18
233 */
234 struct xilinx_vdma_desc_hw {
235 u32 next_desc;
236 u32 pad1;
237 u32 buf_addr;
238 u32 buf_addr_msb;
239 u32 vsize;
240 u32 hsize;
241 u32 stride;
242 } __aligned(64);
243
244 /**
245 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
246 * @next_desc: Next Descriptor Pointer @0x00
247 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
248 * @buf_addr: Buffer address @0x08
249 * @buf_addr_msb: MSB of Buffer address @0x0C
250 * @reserved1: Reserved @0x10
251 * @reserved2: Reserved @0x14
252 * @control: Control field @0x18
253 * @status: Status field @0x1C
254 * @app: APP Fields @0x20 - 0x30
255 */
256 struct xilinx_axidma_desc_hw {
257 u32 next_desc;
258 u32 next_desc_msb;
259 u32 buf_addr;
260 u32 buf_addr_msb;
261 u32 reserved1;
262 u32 reserved2;
263 u32 control;
264 u32 status;
265 u32 app[XILINX_DMA_NUM_APP_WORDS];
266 } __aligned(64);
267
268 /**
269 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
270 * @next_desc: Next Descriptor Pointer @0x00
271 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
272 * @buf_addr: Buffer address @0x08
273 * @buf_addr_msb: MSB of Buffer address @0x0C
274 * @rsvd: Reserved field @0x10
275 * @control: Control Information field @0x14
276 * @status: Status field @0x18
277 * @sideband_status: Status of sideband signals @0x1C
278 * @app: APP Fields @0x20 - 0x30
279 */
280 struct xilinx_aximcdma_desc_hw {
281 u32 next_desc;
282 u32 next_desc_msb;
283 u32 buf_addr;
284 u32 buf_addr_msb;
285 u32 rsvd;
286 u32 control;
287 u32 status;
288 u32 sideband_status;
289 u32 app[XILINX_DMA_NUM_APP_WORDS];
290 } __aligned(64);
291
292 /**
293 * struct xilinx_cdma_desc_hw - Hardware Descriptor
294 * @next_desc: Next Descriptor Pointer @0x00
295 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
296 * @src_addr: Source address @0x08
297 * @src_addr_msb: Source address MSB @0x0C
298 * @dest_addr: Destination address @0x10
299 * @dest_addr_msb: Destination address MSB @0x14
300 * @control: Control field @0x18
301 * @status: Status field @0x1C
302 */
303 struct xilinx_cdma_desc_hw {
304 u32 next_desc;
305 u32 next_desc_msb;
306 u32 src_addr;
307 u32 src_addr_msb;
308 u32 dest_addr;
309 u32 dest_addr_msb;
310 u32 control;
311 u32 status;
312 } __aligned(64);
313
314 /**
315 * struct xilinx_vdma_tx_segment - Descriptor segment
316 * @hw: Hardware descriptor
317 * @node: Node in the descriptor segments list
318 * @phys: Physical address of segment
319 */
320 struct xilinx_vdma_tx_segment {
321 struct xilinx_vdma_desc_hw hw;
322 struct list_head node;
323 dma_addr_t phys;
324 } __aligned(64);
325
326 /**
327 * struct xilinx_axidma_tx_segment - Descriptor segment
328 * @hw: Hardware descriptor
329 * @node: Node in the descriptor segments list
330 * @phys: Physical address of segment
331 */
332 struct xilinx_axidma_tx_segment {
333 struct xilinx_axidma_desc_hw hw;
334 struct list_head node;
335 dma_addr_t phys;
336 } __aligned(64);
337
338 /**
339 * struct xilinx_aximcdma_tx_segment - Descriptor segment
340 * @hw: Hardware descriptor
341 * @node: Node in the descriptor segments list
342 * @phys: Physical address of segment
343 */
344 struct xilinx_aximcdma_tx_segment {
345 struct xilinx_aximcdma_desc_hw hw;
346 struct list_head node;
347 dma_addr_t phys;
348 } __aligned(64);
349
350 /**
351 * struct xilinx_cdma_tx_segment - Descriptor segment
352 * @hw: Hardware descriptor
353 * @node: Node in the descriptor segments list
354 * @phys: Physical address of segment
355 */
356 struct xilinx_cdma_tx_segment {
357 struct xilinx_cdma_desc_hw hw;
358 struct list_head node;
359 dma_addr_t phys;
360 } __aligned(64);
361
362 /**
363 * struct xilinx_dma_tx_descriptor - Per Transaction structure
364 * @async_tx: Async transaction descriptor
365 * @segments: TX segments list
366 * @node: Node in the channel descriptors list
367 * @cyclic: Check for cyclic transfers.
368 * @err: Whether the descriptor has an error.
369 * @residue: Residue of the completed descriptor
370 */
371 struct xilinx_dma_tx_descriptor {
372 struct dma_async_tx_descriptor async_tx;
373 struct list_head segments;
374 struct list_head node;
375 bool cyclic;
376 bool err;
377 u32 residue;
378 };
379
380 /**
381 * struct xilinx_dma_chan - Driver specific DMA channel structure
382 * @xdev: Driver specific device structure
383 * @ctrl_offset: Control registers offset
384 * @desc_offset: TX descriptor registers offset
385 * @lock: Descriptor operation lock
386 * @pending_list: Descriptors waiting
387 * @active_list: Descriptors ready to submit
388 * @done_list: Complete descriptors
389 * @free_seg_list: Free descriptors
390 * @common: DMA common channel
391 * @desc_pool: Descriptors pool
392 * @dev: The dma device
393 * @irq: Channel IRQ
394 * @id: Channel ID
395 * @direction: Transfer direction
396 * @num_frms: Number of frames
397 * @has_sg: Support scatter transfers
398 * @cyclic: Check for cyclic transfers.
399 * @genlock: Support genlock mode
400 * @err: Channel has errors
401 * @idle: Check for channel idle
402 * @terminating: Check for channel being synchronized by user
403 * @tasklet: Cleanup work after irq
404 * @config: Device configuration info
405 * @flush_on_fsync: Flush on Frame sync
406 * @desc_pendingcount: Descriptor pending count
407 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
408 * @desc_submitcount: Descriptor h/w submitted count
409 * @seg_v: Statically allocated segments base
410 * @seg_mv: Statically allocated segments base for MCDMA
411 * @seg_p: Physical allocated segments base
412 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
413 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
414 * @start_transfer: Differentiate b/w DMA IP's transfer
415 * @stop_transfer: Differentiate b/w DMA IP's quiesce
416 * @tdest: TDEST value for mcdma
417 * @has_vflip: S2MM vertical flip
418 * @irq_delay: Interrupt delay timeout
419 */
420 struct xilinx_dma_chan {
421 struct xilinx_dma_device *xdev;
422 u32 ctrl_offset;
423 u32 desc_offset;
424 spinlock_t lock;
425 struct list_head pending_list;
426 struct list_head active_list;
427 struct list_head done_list;
428 struct list_head free_seg_list;
429 struct dma_chan common;
430 struct dma_pool *desc_pool;
431 struct device *dev;
432 int irq;
433 int id;
434 enum dma_transfer_direction direction;
435 int num_frms;
436 bool has_sg;
437 bool cyclic;
438 bool genlock;
439 bool err;
440 bool idle;
441 bool terminating;
442 struct tasklet_struct tasklet;
443 struct xilinx_vdma_config config;
444 bool flush_on_fsync;
445 u32 desc_pendingcount;
446 bool ext_addr;
447 u32 desc_submitcount;
448 struct xilinx_axidma_tx_segment *seg_v;
449 struct xilinx_aximcdma_tx_segment *seg_mv;
450 dma_addr_t seg_p;
451 struct xilinx_axidma_tx_segment *cyclic_seg_v;
452 dma_addr_t cyclic_seg_p;
453 void (*start_transfer)(struct xilinx_dma_chan *chan);
454 int (*stop_transfer)(struct xilinx_dma_chan *chan);
455 u16 tdest;
456 bool has_vflip;
457 u8 irq_delay;
458 };
459
460 /**
461 * enum xdma_ip_type - DMA IP type.
462 *
463 * @XDMA_TYPE_AXIDMA: Axi dma ip.
464 * @XDMA_TYPE_CDMA: Axi cdma ip.
465 * @XDMA_TYPE_VDMA: Axi vdma ip.
466 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
467 *
468 */
469 enum xdma_ip_type {
470 XDMA_TYPE_AXIDMA = 0,
471 XDMA_TYPE_CDMA,
472 XDMA_TYPE_VDMA,
473 XDMA_TYPE_AXIMCDMA
474 };
475
476 struct xilinx_dma_config {
477 enum xdma_ip_type dmatype;
478 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
479 struct clk **tx_clk, struct clk **txs_clk,
480 struct clk **rx_clk, struct clk **rxs_clk);
481 irqreturn_t (*irq_handler)(int irq, void *data);
482 const int max_channels;
483 };
484
485 /**
486 * struct xilinx_dma_device - DMA device structure
487 * @regs: I/O mapped base address
488 * @dev: Device Structure
489 * @common: DMA device structure
490 * @chan: Driver specific DMA channel
491 * @flush_on_fsync: Flush on frame sync
492 * @ext_addr: Indicates 64 bit addressing is supported by dma device
493 * @pdev: Platform device structure pointer
494 * @dma_config: DMA config structure
495 * @axi_clk: DMA Axi4-lite interace clock
496 * @tx_clk: DMA mm2s clock
497 * @txs_clk: DMA mm2s stream clock
498 * @rx_clk: DMA s2mm clock
499 * @rxs_clk: DMA s2mm stream clock
500 * @s2mm_chan_id: DMA s2mm channel identifier
501 * @mm2s_chan_id: DMA mm2s channel identifier
502 * @max_buffer_len: Max buffer length
503 * @has_axistream_connected: AXI DMA connected to AXI Stream IP
504 */
505 struct xilinx_dma_device {
506 void __iomem *regs;
507 struct device *dev;
508 struct dma_device common;
509 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
510 u32 flush_on_fsync;
511 bool ext_addr;
512 struct platform_device *pdev;
513 const struct xilinx_dma_config *dma_config;
514 struct clk *axi_clk;
515 struct clk *tx_clk;
516 struct clk *txs_clk;
517 struct clk *rx_clk;
518 struct clk *rxs_clk;
519 u32 s2mm_chan_id;
520 u32 mm2s_chan_id;
521 u32 max_buffer_len;
522 bool has_axistream_connected;
523 };
524
525 /* Macros */
526 #define to_xilinx_chan(chan) \
527 container_of(chan, struct xilinx_dma_chan, common)
528 #define to_dma_tx_descriptor(tx) \
529 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
530 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
531 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
532 val, cond, delay_us, timeout_us)
533
534 /* IO accessors */
dma_read(struct xilinx_dma_chan * chan,u32 reg)535 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
536 {
537 return ioread32(chan->xdev->regs + reg);
538 }
539
dma_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)540 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
541 {
542 iowrite32(value, chan->xdev->regs + reg);
543 }
544
vdma_desc_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)545 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
546 u32 value)
547 {
548 dma_write(chan, chan->desc_offset + reg, value);
549 }
550
dma_ctrl_read(struct xilinx_dma_chan * chan,u32 reg)551 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
552 {
553 return dma_read(chan, chan->ctrl_offset + reg);
554 }
555
dma_ctrl_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)556 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
557 u32 value)
558 {
559 dma_write(chan, chan->ctrl_offset + reg, value);
560 }
561
dma_ctrl_clr(struct xilinx_dma_chan * chan,u32 reg,u32 clr)562 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
563 u32 clr)
564 {
565 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
566 }
567
dma_ctrl_set(struct xilinx_dma_chan * chan,u32 reg,u32 set)568 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
569 u32 set)
570 {
571 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
572 }
573
574 /**
575 * vdma_desc_write_64 - 64-bit descriptor write
576 * @chan: Driver specific VDMA channel
577 * @reg: Register to write
578 * @value_lsb: lower address of the descriptor.
579 * @value_msb: upper address of the descriptor.
580 *
581 * Since vdma driver is trying to write to a register offset which is not a
582 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
583 * instead of a single 64 bit register write.
584 */
vdma_desc_write_64(struct xilinx_dma_chan * chan,u32 reg,u32 value_lsb,u32 value_msb)585 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
586 u32 value_lsb, u32 value_msb)
587 {
588 /* Write the lsb 32 bits*/
589 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
590
591 /* Write the msb 32 bits */
592 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
593 }
594
dma_writeq(struct xilinx_dma_chan * chan,u32 reg,u64 value)595 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
596 {
597 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
598 }
599
xilinx_write(struct xilinx_dma_chan * chan,u32 reg,dma_addr_t addr)600 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
601 dma_addr_t addr)
602 {
603 if (chan->ext_addr)
604 dma_writeq(chan, reg, addr);
605 else
606 dma_ctrl_write(chan, reg, addr);
607 }
608
xilinx_axidma_buf(struct xilinx_dma_chan * chan,struct xilinx_axidma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used,size_t period_len)609 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
610 struct xilinx_axidma_desc_hw *hw,
611 dma_addr_t buf_addr, size_t sg_used,
612 size_t period_len)
613 {
614 if (chan->ext_addr) {
615 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
616 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
617 period_len);
618 } else {
619 hw->buf_addr = buf_addr + sg_used + period_len;
620 }
621 }
622
xilinx_aximcdma_buf(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used)623 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
624 struct xilinx_aximcdma_desc_hw *hw,
625 dma_addr_t buf_addr, size_t sg_used)
626 {
627 if (chan->ext_addr) {
628 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
629 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
630 } else {
631 hw->buf_addr = buf_addr + sg_used;
632 }
633 }
634
635 /**
636 * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
637 * @tx: async transaction descriptor
638 * @payload_len: metadata payload length
639 * @max_len: metadata max length
640 * Return: The app field pointer.
641 */
xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor * tx,size_t * payload_len,size_t * max_len)642 static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
643 size_t *payload_len, size_t *max_len)
644 {
645 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
646 struct xilinx_axidma_tx_segment *seg;
647
648 *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
649 seg = list_first_entry(&desc->segments,
650 struct xilinx_axidma_tx_segment, node);
651 return seg->hw.app;
652 }
653
654 static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
655 .get_ptr = xilinx_dma_get_metadata_ptr,
656 };
657
658 /* -----------------------------------------------------------------------------
659 * Descriptors and segments alloc and free
660 */
661
662 /**
663 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
664 * @chan: Driver specific DMA channel
665 *
666 * Return: The allocated segment on success and NULL on failure.
667 */
668 static struct xilinx_vdma_tx_segment *
xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan * chan)669 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
670 {
671 struct xilinx_vdma_tx_segment *segment;
672 dma_addr_t phys;
673
674 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
675 if (!segment)
676 return NULL;
677
678 segment->phys = phys;
679
680 return segment;
681 }
682
683 /**
684 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
685 * @chan: Driver specific DMA channel
686 *
687 * Return: The allocated segment on success and NULL on failure.
688 */
689 static struct xilinx_cdma_tx_segment *
xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan * chan)690 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
691 {
692 struct xilinx_cdma_tx_segment *segment;
693 dma_addr_t phys;
694
695 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
696 if (!segment)
697 return NULL;
698
699 segment->phys = phys;
700
701 return segment;
702 }
703
704 /**
705 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
706 * @chan: Driver specific DMA channel
707 *
708 * Return: The allocated segment on success and NULL on failure.
709 */
710 static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan * chan)711 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
712 {
713 struct xilinx_axidma_tx_segment *segment = NULL;
714 unsigned long flags;
715
716 spin_lock_irqsave(&chan->lock, flags);
717 if (!list_empty(&chan->free_seg_list)) {
718 segment = list_first_entry(&chan->free_seg_list,
719 struct xilinx_axidma_tx_segment,
720 node);
721 list_del(&segment->node);
722 }
723 spin_unlock_irqrestore(&chan->lock, flags);
724
725 if (!segment)
726 dev_dbg(chan->dev, "Could not find free tx segment\n");
727
728 return segment;
729 }
730
731 /**
732 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
733 * @chan: Driver specific DMA channel
734 *
735 * Return: The allocated segment on success and NULL on failure.
736 */
737 static struct xilinx_aximcdma_tx_segment *
xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan * chan)738 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
739 {
740 struct xilinx_aximcdma_tx_segment *segment = NULL;
741 unsigned long flags;
742
743 spin_lock_irqsave(&chan->lock, flags);
744 if (!list_empty(&chan->free_seg_list)) {
745 segment = list_first_entry(&chan->free_seg_list,
746 struct xilinx_aximcdma_tx_segment,
747 node);
748 list_del(&segment->node);
749 }
750 spin_unlock_irqrestore(&chan->lock, flags);
751
752 return segment;
753 }
754
xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw * hw)755 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
756 {
757 u32 next_desc = hw->next_desc;
758 u32 next_desc_msb = hw->next_desc_msb;
759
760 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
761
762 hw->next_desc = next_desc;
763 hw->next_desc_msb = next_desc_msb;
764 }
765
xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw * hw)766 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
767 {
768 u32 next_desc = hw->next_desc;
769 u32 next_desc_msb = hw->next_desc_msb;
770
771 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
772
773 hw->next_desc = next_desc;
774 hw->next_desc_msb = next_desc_msb;
775 }
776
777 /**
778 * xilinx_dma_free_tx_segment - Free transaction segment
779 * @chan: Driver specific DMA channel
780 * @segment: DMA transaction segment
781 */
xilinx_dma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_axidma_tx_segment * segment)782 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
783 struct xilinx_axidma_tx_segment *segment)
784 {
785 xilinx_dma_clean_hw_desc(&segment->hw);
786
787 list_add_tail(&segment->node, &chan->free_seg_list);
788 }
789
790 /**
791 * xilinx_mcdma_free_tx_segment - Free transaction segment
792 * @chan: Driver specific DMA channel
793 * @segment: DMA transaction segment
794 */
xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_tx_segment * segment)795 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
796 struct xilinx_aximcdma_tx_segment *
797 segment)
798 {
799 xilinx_mcdma_clean_hw_desc(&segment->hw);
800
801 list_add_tail(&segment->node, &chan->free_seg_list);
802 }
803
804 /**
805 * xilinx_cdma_free_tx_segment - Free transaction segment
806 * @chan: Driver specific DMA channel
807 * @segment: DMA transaction segment
808 */
xilinx_cdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_cdma_tx_segment * segment)809 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
810 struct xilinx_cdma_tx_segment *segment)
811 {
812 dma_pool_free(chan->desc_pool, segment, segment->phys);
813 }
814
815 /**
816 * xilinx_vdma_free_tx_segment - Free transaction segment
817 * @chan: Driver specific DMA channel
818 * @segment: DMA transaction segment
819 */
xilinx_vdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_vdma_tx_segment * segment)820 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
821 struct xilinx_vdma_tx_segment *segment)
822 {
823 dma_pool_free(chan->desc_pool, segment, segment->phys);
824 }
825
826 /**
827 * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
828 * @chan: Driver specific DMA channel
829 *
830 * Return: The allocated descriptor on success and NULL on failure.
831 */
832 static struct xilinx_dma_tx_descriptor *
xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan * chan)833 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
834 {
835 struct xilinx_dma_tx_descriptor *desc;
836
837 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
838 if (!desc)
839 return NULL;
840
841 INIT_LIST_HEAD(&desc->segments);
842
843 return desc;
844 }
845
846 /**
847 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
848 * @chan: Driver specific DMA channel
849 * @desc: DMA transaction descriptor
850 */
851 static void
xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)852 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
853 struct xilinx_dma_tx_descriptor *desc)
854 {
855 struct xilinx_vdma_tx_segment *segment, *next;
856 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
857 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
858 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
859
860 if (!desc)
861 return;
862
863 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
864 list_for_each_entry_safe(segment, next, &desc->segments, node) {
865 list_del(&segment->node);
866 xilinx_vdma_free_tx_segment(chan, segment);
867 }
868 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
869 list_for_each_entry_safe(cdma_segment, cdma_next,
870 &desc->segments, node) {
871 list_del(&cdma_segment->node);
872 xilinx_cdma_free_tx_segment(chan, cdma_segment);
873 }
874 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
875 list_for_each_entry_safe(axidma_segment, axidma_next,
876 &desc->segments, node) {
877 list_del(&axidma_segment->node);
878 xilinx_dma_free_tx_segment(chan, axidma_segment);
879 }
880 } else {
881 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
882 &desc->segments, node) {
883 list_del(&aximcdma_segment->node);
884 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
885 }
886 }
887
888 kfree(desc);
889 }
890
891 /* Required functions */
892
893 /**
894 * xilinx_dma_free_desc_list - Free descriptors list
895 * @chan: Driver specific DMA channel
896 * @list: List to parse and delete the descriptor
897 */
xilinx_dma_free_desc_list(struct xilinx_dma_chan * chan,struct list_head * list)898 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
899 struct list_head *list)
900 {
901 struct xilinx_dma_tx_descriptor *desc, *next;
902
903 list_for_each_entry_safe(desc, next, list, node) {
904 list_del(&desc->node);
905 xilinx_dma_free_tx_descriptor(chan, desc);
906 }
907 }
908
909 /**
910 * xilinx_dma_free_descriptors - Free channel descriptors
911 * @chan: Driver specific DMA channel
912 */
xilinx_dma_free_descriptors(struct xilinx_dma_chan * chan)913 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
914 {
915 unsigned long flags;
916
917 spin_lock_irqsave(&chan->lock, flags);
918
919 xilinx_dma_free_desc_list(chan, &chan->pending_list);
920 xilinx_dma_free_desc_list(chan, &chan->done_list);
921 xilinx_dma_free_desc_list(chan, &chan->active_list);
922
923 spin_unlock_irqrestore(&chan->lock, flags);
924 }
925
926 /**
927 * xilinx_dma_free_chan_resources - Free channel resources
928 * @dchan: DMA channel
929 */
xilinx_dma_free_chan_resources(struct dma_chan * dchan)930 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
931 {
932 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
933 unsigned long flags;
934
935 dev_dbg(chan->dev, "Free all channel resources.\n");
936
937 xilinx_dma_free_descriptors(chan);
938
939 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
940 spin_lock_irqsave(&chan->lock, flags);
941 INIT_LIST_HEAD(&chan->free_seg_list);
942 spin_unlock_irqrestore(&chan->lock, flags);
943
944 /* Free memory that is allocated for BD */
945 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
946 XILINX_DMA_NUM_DESCS, chan->seg_v,
947 chan->seg_p);
948
949 /* Free Memory that is allocated for cyclic DMA Mode */
950 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
951 chan->cyclic_seg_v, chan->cyclic_seg_p);
952 }
953
954 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
955 spin_lock_irqsave(&chan->lock, flags);
956 INIT_LIST_HEAD(&chan->free_seg_list);
957 spin_unlock_irqrestore(&chan->lock, flags);
958
959 /* Free memory that is allocated for BD */
960 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
961 XILINX_DMA_NUM_DESCS, chan->seg_mv,
962 chan->seg_p);
963 }
964
965 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
966 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
967 dma_pool_destroy(chan->desc_pool);
968 chan->desc_pool = NULL;
969 }
970
971 }
972
973 /**
974 * xilinx_dma_get_residue - Compute residue for a given descriptor
975 * @chan: Driver specific dma channel
976 * @desc: dma transaction descriptor
977 *
978 * Return: The number of residue bytes for the descriptor.
979 */
xilinx_dma_get_residue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)980 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
981 struct xilinx_dma_tx_descriptor *desc)
982 {
983 struct xilinx_cdma_tx_segment *cdma_seg;
984 struct xilinx_axidma_tx_segment *axidma_seg;
985 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
986 struct xilinx_cdma_desc_hw *cdma_hw;
987 struct xilinx_axidma_desc_hw *axidma_hw;
988 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
989 struct list_head *entry;
990 u32 residue = 0;
991
992 list_for_each(entry, &desc->segments) {
993 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
994 cdma_seg = list_entry(entry,
995 struct xilinx_cdma_tx_segment,
996 node);
997 cdma_hw = &cdma_seg->hw;
998 residue += (cdma_hw->control - cdma_hw->status) &
999 chan->xdev->max_buffer_len;
1000 } else if (chan->xdev->dma_config->dmatype ==
1001 XDMA_TYPE_AXIDMA) {
1002 axidma_seg = list_entry(entry,
1003 struct xilinx_axidma_tx_segment,
1004 node);
1005 axidma_hw = &axidma_seg->hw;
1006 residue += (axidma_hw->control - axidma_hw->status) &
1007 chan->xdev->max_buffer_len;
1008 } else {
1009 aximcdma_seg =
1010 list_entry(entry,
1011 struct xilinx_aximcdma_tx_segment,
1012 node);
1013 aximcdma_hw = &aximcdma_seg->hw;
1014 residue +=
1015 (aximcdma_hw->control - aximcdma_hw->status) &
1016 chan->xdev->max_buffer_len;
1017 }
1018 }
1019
1020 return residue;
1021 }
1022
1023 /**
1024 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
1025 * @chan: Driver specific dma channel
1026 * @desc: dma transaction descriptor
1027 * @flags: flags for spin lock
1028 */
xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc,unsigned long * flags)1029 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
1030 struct xilinx_dma_tx_descriptor *desc,
1031 unsigned long *flags)
1032 {
1033 struct dmaengine_desc_callback cb;
1034
1035 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1036 if (dmaengine_desc_callback_valid(&cb)) {
1037 spin_unlock_irqrestore(&chan->lock, *flags);
1038 dmaengine_desc_callback_invoke(&cb, NULL);
1039 spin_lock_irqsave(&chan->lock, *flags);
1040 }
1041 }
1042
1043 /**
1044 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1045 * @chan: Driver specific DMA channel
1046 */
xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan * chan)1047 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1048 {
1049 struct xilinx_dma_tx_descriptor *desc, *next;
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&chan->lock, flags);
1053
1054 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1055 struct dmaengine_result result;
1056
1057 if (desc->cyclic) {
1058 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1059 break;
1060 }
1061
1062 /* Remove from the list of running transactions */
1063 list_del(&desc->node);
1064
1065 if (unlikely(desc->err)) {
1066 if (chan->direction == DMA_DEV_TO_MEM)
1067 result.result = DMA_TRANS_READ_FAILED;
1068 else
1069 result.result = DMA_TRANS_WRITE_FAILED;
1070 } else {
1071 result.result = DMA_TRANS_NOERROR;
1072 }
1073
1074 result.residue = desc->residue;
1075
1076 /* Run the link descriptor callback function */
1077 spin_unlock_irqrestore(&chan->lock, flags);
1078 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1079 spin_lock_irqsave(&chan->lock, flags);
1080
1081 /* Run any dependencies, then free the descriptor */
1082 dma_run_dependencies(&desc->async_tx);
1083 xilinx_dma_free_tx_descriptor(chan, desc);
1084
1085 /*
1086 * While we ran a callback the user called a terminate function,
1087 * which takes care of cleaning up any remaining descriptors
1088 */
1089 if (chan->terminating)
1090 break;
1091 }
1092
1093 spin_unlock_irqrestore(&chan->lock, flags);
1094 }
1095
1096 /**
1097 * xilinx_dma_do_tasklet - Schedule completion tasklet
1098 * @t: Pointer to the Xilinx DMA channel structure
1099 */
xilinx_dma_do_tasklet(struct tasklet_struct * t)1100 static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1101 {
1102 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1103
1104 xilinx_dma_chan_desc_cleanup(chan);
1105 }
1106
1107 /**
1108 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1109 * @dchan: DMA channel
1110 *
1111 * Return: '0' on success and failure value on error
1112 */
xilinx_dma_alloc_chan_resources(struct dma_chan * dchan)1113 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1114 {
1115 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1116 int i;
1117
1118 /* Has this channel already been allocated? */
1119 if (chan->desc_pool)
1120 return 0;
1121
1122 /*
1123 * We need the descriptor to be aligned to 64bytes
1124 * for meeting Xilinx VDMA specification requirement.
1125 */
1126 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1127 /* Allocate the buffer descriptors. */
1128 chan->seg_v = dma_alloc_coherent(chan->dev,
1129 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1130 &chan->seg_p, GFP_KERNEL);
1131 if (!chan->seg_v) {
1132 dev_err(chan->dev,
1133 "unable to allocate channel %d descriptors\n",
1134 chan->id);
1135 return -ENOMEM;
1136 }
1137 /*
1138 * For cyclic DMA mode we need to program the tail Descriptor
1139 * register with a value which is not a part of the BD chain
1140 * so allocating a desc segment during channel allocation for
1141 * programming tail descriptor.
1142 */
1143 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1144 sizeof(*chan->cyclic_seg_v),
1145 &chan->cyclic_seg_p,
1146 GFP_KERNEL);
1147 if (!chan->cyclic_seg_v) {
1148 dev_err(chan->dev,
1149 "unable to allocate desc segment for cyclic DMA\n");
1150 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1151 XILINX_DMA_NUM_DESCS, chan->seg_v,
1152 chan->seg_p);
1153 return -ENOMEM;
1154 }
1155 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1156
1157 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1158 chan->seg_v[i].hw.next_desc =
1159 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1160 ((i + 1) % XILINX_DMA_NUM_DESCS));
1161 chan->seg_v[i].hw.next_desc_msb =
1162 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1163 ((i + 1) % XILINX_DMA_NUM_DESCS));
1164 chan->seg_v[i].phys = chan->seg_p +
1165 sizeof(*chan->seg_v) * i;
1166 list_add_tail(&chan->seg_v[i].node,
1167 &chan->free_seg_list);
1168 }
1169 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1170 /* Allocate the buffer descriptors. */
1171 chan->seg_mv = dma_alloc_coherent(chan->dev,
1172 sizeof(*chan->seg_mv) *
1173 XILINX_DMA_NUM_DESCS,
1174 &chan->seg_p, GFP_KERNEL);
1175 if (!chan->seg_mv) {
1176 dev_err(chan->dev,
1177 "unable to allocate channel %d descriptors\n",
1178 chan->id);
1179 return -ENOMEM;
1180 }
1181 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1182 chan->seg_mv[i].hw.next_desc =
1183 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1184 ((i + 1) % XILINX_DMA_NUM_DESCS));
1185 chan->seg_mv[i].hw.next_desc_msb =
1186 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1187 ((i + 1) % XILINX_DMA_NUM_DESCS));
1188 chan->seg_mv[i].phys = chan->seg_p +
1189 sizeof(*chan->seg_mv) * i;
1190 list_add_tail(&chan->seg_mv[i].node,
1191 &chan->free_seg_list);
1192 }
1193 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1194 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1195 chan->dev,
1196 sizeof(struct xilinx_cdma_tx_segment),
1197 __alignof__(struct xilinx_cdma_tx_segment),
1198 0);
1199 } else {
1200 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1201 chan->dev,
1202 sizeof(struct xilinx_vdma_tx_segment),
1203 __alignof__(struct xilinx_vdma_tx_segment),
1204 0);
1205 }
1206
1207 if (!chan->desc_pool &&
1208 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1209 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1210 dev_err(chan->dev,
1211 "unable to allocate channel %d descriptor pool\n",
1212 chan->id);
1213 return -ENOMEM;
1214 }
1215
1216 dma_cookie_init(dchan);
1217
1218 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1219 /* For AXI DMA resetting once channel will reset the
1220 * other channel as well so enable the interrupts here.
1221 */
1222 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1223 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1224 }
1225
1226 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1227 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1228 XILINX_CDMA_CR_SGMODE);
1229
1230 return 0;
1231 }
1232
1233 /**
1234 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1235 * @chan: Driver specific DMA channel
1236 * @size: Total data that needs to be copied
1237 * @done: Amount of data that has been already copied
1238 *
1239 * Return: Amount of data that has to be copied
1240 */
xilinx_dma_calc_copysize(struct xilinx_dma_chan * chan,int size,int done)1241 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1242 int size, int done)
1243 {
1244 size_t copy;
1245
1246 copy = min_t(size_t, size - done,
1247 chan->xdev->max_buffer_len);
1248
1249 if ((copy + done < size) &&
1250 chan->xdev->common.copy_align) {
1251 /*
1252 * If this is not the last descriptor, make sure
1253 * the next one will be properly aligned
1254 */
1255 copy = rounddown(copy,
1256 (1 << chan->xdev->common.copy_align));
1257 }
1258 return copy;
1259 }
1260
1261 /**
1262 * xilinx_dma_tx_status - Get DMA transaction status
1263 * @dchan: DMA channel
1264 * @cookie: Transaction identifier
1265 * @txstate: Transaction state
1266 *
1267 * Return: DMA transaction status
1268 */
xilinx_dma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1269 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1270 dma_cookie_t cookie,
1271 struct dma_tx_state *txstate)
1272 {
1273 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1274 struct xilinx_dma_tx_descriptor *desc;
1275 enum dma_status ret;
1276 unsigned long flags;
1277 u32 residue = 0;
1278
1279 ret = dma_cookie_status(dchan, cookie, txstate);
1280 if (ret == DMA_COMPLETE || !txstate)
1281 return ret;
1282
1283 spin_lock_irqsave(&chan->lock, flags);
1284 if (!list_empty(&chan->active_list)) {
1285 desc = list_last_entry(&chan->active_list,
1286 struct xilinx_dma_tx_descriptor, node);
1287 /*
1288 * VDMA and simple mode do not support residue reporting, so the
1289 * residue field will always be 0.
1290 */
1291 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1292 residue = xilinx_dma_get_residue(chan, desc);
1293 }
1294 spin_unlock_irqrestore(&chan->lock, flags);
1295
1296 dma_set_residue(txstate, residue);
1297
1298 return ret;
1299 }
1300
1301 /**
1302 * xilinx_dma_stop_transfer - Halt DMA channel
1303 * @chan: Driver specific DMA channel
1304 *
1305 * Return: '0' on success and failure value on error
1306 */
xilinx_dma_stop_transfer(struct xilinx_dma_chan * chan)1307 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1308 {
1309 u32 val;
1310
1311 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1312
1313 /* Wait for the hardware to halt */
1314 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1315 val & XILINX_DMA_DMASR_HALTED, 0,
1316 XILINX_DMA_LOOP_COUNT);
1317 }
1318
1319 /**
1320 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1321 * @chan: Driver specific DMA channel
1322 *
1323 * Return: '0' on success and failure value on error
1324 */
xilinx_cdma_stop_transfer(struct xilinx_dma_chan * chan)1325 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1326 {
1327 u32 val;
1328
1329 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1330 val & XILINX_DMA_DMASR_IDLE, 0,
1331 XILINX_DMA_LOOP_COUNT);
1332 }
1333
1334 /**
1335 * xilinx_dma_start - Start DMA channel
1336 * @chan: Driver specific DMA channel
1337 */
xilinx_dma_start(struct xilinx_dma_chan * chan)1338 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1339 {
1340 int err;
1341 u32 val;
1342
1343 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1344
1345 /* Wait for the hardware to start */
1346 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1347 !(val & XILINX_DMA_DMASR_HALTED), 0,
1348 XILINX_DMA_LOOP_COUNT);
1349
1350 if (err) {
1351 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1352 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1353
1354 chan->err = true;
1355 }
1356 }
1357
1358 /**
1359 * xilinx_vdma_start_transfer - Starts VDMA transfer
1360 * @chan: Driver specific channel struct pointer
1361 */
xilinx_vdma_start_transfer(struct xilinx_dma_chan * chan)1362 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1363 {
1364 struct xilinx_vdma_config *config = &chan->config;
1365 struct xilinx_dma_tx_descriptor *desc;
1366 u32 reg, j;
1367 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1368 int i = 0;
1369
1370 /* This function was invoked with lock held */
1371 if (chan->err)
1372 return;
1373
1374 if (!chan->idle)
1375 return;
1376
1377 if (list_empty(&chan->pending_list))
1378 return;
1379
1380 desc = list_first_entry(&chan->pending_list,
1381 struct xilinx_dma_tx_descriptor, node);
1382
1383 /* Configure the hardware using info in the config structure */
1384 if (chan->has_vflip) {
1385 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1386 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1387 reg |= config->vflip_en;
1388 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1389 reg);
1390 }
1391
1392 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1393
1394 if (config->frm_cnt_en)
1395 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1396 else
1397 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1398
1399 /* If not parking, enable circular mode */
1400 if (config->park)
1401 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1402 else
1403 reg |= XILINX_DMA_DMACR_CIRC_EN;
1404
1405 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1406
1407 if (config->park) {
1408 j = chan->desc_submitcount;
1409 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1410 if (chan->direction == DMA_MEM_TO_DEV) {
1411 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1412 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1413 } else {
1414 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1415 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1416 }
1417 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1418 }
1419
1420 /* Start the hardware */
1421 xilinx_dma_start(chan);
1422
1423 if (chan->err)
1424 return;
1425
1426 /* Start the transfer */
1427 if (chan->desc_submitcount < chan->num_frms)
1428 i = chan->desc_submitcount;
1429
1430 list_for_each_entry(segment, &desc->segments, node) {
1431 if (chan->ext_addr)
1432 vdma_desc_write_64(chan,
1433 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1434 segment->hw.buf_addr,
1435 segment->hw.buf_addr_msb);
1436 else
1437 vdma_desc_write(chan,
1438 XILINX_VDMA_REG_START_ADDRESS(i++),
1439 segment->hw.buf_addr);
1440
1441 last = segment;
1442 }
1443
1444 if (!last)
1445 return;
1446
1447 /* HW expects these parameters to be same for one transaction */
1448 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1449 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1450 last->hw.stride);
1451 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1452
1453 chan->desc_submitcount++;
1454 chan->desc_pendingcount--;
1455 list_move_tail(&desc->node, &chan->active_list);
1456 if (chan->desc_submitcount == chan->num_frms)
1457 chan->desc_submitcount = 0;
1458
1459 chan->idle = false;
1460 }
1461
1462 /**
1463 * xilinx_cdma_start_transfer - Starts cdma transfer
1464 * @chan: Driver specific channel struct pointer
1465 */
xilinx_cdma_start_transfer(struct xilinx_dma_chan * chan)1466 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1467 {
1468 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1469 struct xilinx_cdma_tx_segment *tail_segment;
1470 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1471
1472 if (chan->err)
1473 return;
1474
1475 if (!chan->idle)
1476 return;
1477
1478 if (list_empty(&chan->pending_list))
1479 return;
1480
1481 head_desc = list_first_entry(&chan->pending_list,
1482 struct xilinx_dma_tx_descriptor, node);
1483 tail_desc = list_last_entry(&chan->pending_list,
1484 struct xilinx_dma_tx_descriptor, node);
1485 tail_segment = list_last_entry(&tail_desc->segments,
1486 struct xilinx_cdma_tx_segment, node);
1487
1488 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1489 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1490 ctrl_reg |= chan->desc_pendingcount <<
1491 XILINX_DMA_CR_COALESCE_SHIFT;
1492 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1493 }
1494
1495 if (chan->has_sg) {
1496 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1497 XILINX_CDMA_CR_SGMODE);
1498
1499 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1500 XILINX_CDMA_CR_SGMODE);
1501
1502 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1503 head_desc->async_tx.phys);
1504
1505 /* Update tail ptr register which will start the transfer */
1506 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1507 tail_segment->phys);
1508 } else {
1509 /* In simple mode */
1510 struct xilinx_cdma_tx_segment *segment;
1511 struct xilinx_cdma_desc_hw *hw;
1512
1513 segment = list_first_entry(&head_desc->segments,
1514 struct xilinx_cdma_tx_segment,
1515 node);
1516
1517 hw = &segment->hw;
1518
1519 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1520 xilinx_prep_dma_addr_t(hw->src_addr));
1521 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1522 xilinx_prep_dma_addr_t(hw->dest_addr));
1523
1524 /* Start the transfer */
1525 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1526 hw->control & chan->xdev->max_buffer_len);
1527 }
1528
1529 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1530 chan->desc_pendingcount = 0;
1531 chan->idle = false;
1532 }
1533
1534 /**
1535 * xilinx_dma_start_transfer - Starts DMA transfer
1536 * @chan: Driver specific channel struct pointer
1537 */
xilinx_dma_start_transfer(struct xilinx_dma_chan * chan)1538 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1539 {
1540 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1541 struct xilinx_axidma_tx_segment *tail_segment;
1542 u32 reg;
1543
1544 if (chan->err)
1545 return;
1546
1547 if (list_empty(&chan->pending_list))
1548 return;
1549
1550 if (!chan->idle)
1551 return;
1552
1553 head_desc = list_first_entry(&chan->pending_list,
1554 struct xilinx_dma_tx_descriptor, node);
1555 tail_desc = list_last_entry(&chan->pending_list,
1556 struct xilinx_dma_tx_descriptor, node);
1557 tail_segment = list_last_entry(&tail_desc->segments,
1558 struct xilinx_axidma_tx_segment, node);
1559
1560 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1561
1562 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1563 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1564 reg |= chan->desc_pendingcount <<
1565 XILINX_DMA_CR_COALESCE_SHIFT;
1566 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1567 }
1568
1569 if (chan->has_sg)
1570 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1571 head_desc->async_tx.phys);
1572 reg &= ~XILINX_DMA_CR_DELAY_MAX;
1573 reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
1574 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1575
1576 xilinx_dma_start(chan);
1577
1578 if (chan->err)
1579 return;
1580
1581 /* Start the transfer */
1582 if (chan->has_sg) {
1583 if (chan->cyclic)
1584 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1585 chan->cyclic_seg_v->phys);
1586 else
1587 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1588 tail_segment->phys);
1589 } else {
1590 struct xilinx_axidma_tx_segment *segment;
1591 struct xilinx_axidma_desc_hw *hw;
1592
1593 segment = list_first_entry(&head_desc->segments,
1594 struct xilinx_axidma_tx_segment,
1595 node);
1596 hw = &segment->hw;
1597
1598 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1599 xilinx_prep_dma_addr_t(hw->buf_addr));
1600
1601 /* Start the transfer */
1602 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1603 hw->control & chan->xdev->max_buffer_len);
1604 }
1605
1606 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1607 chan->desc_pendingcount = 0;
1608 chan->idle = false;
1609 }
1610
1611 /**
1612 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1613 * @chan: Driver specific channel struct pointer
1614 */
xilinx_mcdma_start_transfer(struct xilinx_dma_chan * chan)1615 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1616 {
1617 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1618 struct xilinx_aximcdma_tx_segment *tail_segment;
1619 u32 reg;
1620
1621 /*
1622 * lock has been held by calling functions, so we don't need it
1623 * to take it here again.
1624 */
1625
1626 if (chan->err)
1627 return;
1628
1629 if (!chan->idle)
1630 return;
1631
1632 if (list_empty(&chan->pending_list))
1633 return;
1634
1635 head_desc = list_first_entry(&chan->pending_list,
1636 struct xilinx_dma_tx_descriptor, node);
1637 tail_desc = list_last_entry(&chan->pending_list,
1638 struct xilinx_dma_tx_descriptor, node);
1639 tail_segment = list_last_entry(&tail_desc->segments,
1640 struct xilinx_aximcdma_tx_segment, node);
1641
1642 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1643
1644 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1645 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1646 reg |= chan->desc_pendingcount <<
1647 XILINX_MCDMA_COALESCE_SHIFT;
1648 }
1649
1650 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1651 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1652
1653 /* Program current descriptor */
1654 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1655 head_desc->async_tx.phys);
1656
1657 /* Program channel enable register */
1658 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1659 reg |= BIT(chan->tdest);
1660 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1661
1662 /* Start the fetch of BDs for the channel */
1663 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1664 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1665 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1666
1667 xilinx_dma_start(chan);
1668
1669 if (chan->err)
1670 return;
1671
1672 /* Start the transfer */
1673 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1674 tail_segment->phys);
1675
1676 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1677 chan->desc_pendingcount = 0;
1678 chan->idle = false;
1679 }
1680
1681 /**
1682 * xilinx_dma_issue_pending - Issue pending transactions
1683 * @dchan: DMA channel
1684 */
xilinx_dma_issue_pending(struct dma_chan * dchan)1685 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1686 {
1687 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1688 unsigned long flags;
1689
1690 spin_lock_irqsave(&chan->lock, flags);
1691 chan->start_transfer(chan);
1692 spin_unlock_irqrestore(&chan->lock, flags);
1693 }
1694
1695 /**
1696 * xilinx_dma_device_config - Configure the DMA channel
1697 * @dchan: DMA channel
1698 * @config: channel configuration
1699 *
1700 * Return: 0 always.
1701 */
xilinx_dma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)1702 static int xilinx_dma_device_config(struct dma_chan *dchan,
1703 struct dma_slave_config *config)
1704 {
1705 return 0;
1706 }
1707
1708 /**
1709 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1710 * @chan : xilinx DMA channel
1711 *
1712 * CONTEXT: hardirq
1713 */
xilinx_dma_complete_descriptor(struct xilinx_dma_chan * chan)1714 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1715 {
1716 struct xilinx_dma_tx_descriptor *desc, *next;
1717
1718 /* This function was invoked with lock held */
1719 if (list_empty(&chan->active_list))
1720 return;
1721
1722 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1723 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1724 struct xilinx_axidma_tx_segment *seg;
1725
1726 seg = list_last_entry(&desc->segments,
1727 struct xilinx_axidma_tx_segment, node);
1728 if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
1729 break;
1730 }
1731 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1732 XDMA_TYPE_VDMA)
1733 desc->residue = xilinx_dma_get_residue(chan, desc);
1734 else
1735 desc->residue = 0;
1736 desc->err = chan->err;
1737
1738 list_del(&desc->node);
1739 if (!desc->cyclic)
1740 dma_cookie_complete(&desc->async_tx);
1741 list_add_tail(&desc->node, &chan->done_list);
1742 }
1743 }
1744
1745 /**
1746 * xilinx_dma_reset - Reset DMA channel
1747 * @chan: Driver specific DMA channel
1748 *
1749 * Return: '0' on success and failure value on error
1750 */
xilinx_dma_reset(struct xilinx_dma_chan * chan)1751 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1752 {
1753 int err;
1754 u32 tmp;
1755
1756 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1757
1758 /* Wait for the hardware to finish reset */
1759 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1760 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1761 XILINX_DMA_LOOP_COUNT);
1762
1763 if (err) {
1764 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1765 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1766 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1767 return -ETIMEDOUT;
1768 }
1769
1770 chan->err = false;
1771 chan->idle = true;
1772 chan->desc_pendingcount = 0;
1773 chan->desc_submitcount = 0;
1774
1775 return err;
1776 }
1777
1778 /**
1779 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1780 * @chan: Driver specific DMA channel
1781 *
1782 * Return: '0' on success and failure value on error
1783 */
xilinx_dma_chan_reset(struct xilinx_dma_chan * chan)1784 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1785 {
1786 int err;
1787
1788 /* Reset VDMA */
1789 err = xilinx_dma_reset(chan);
1790 if (err)
1791 return err;
1792
1793 /* Enable interrupts */
1794 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1795 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1796
1797 return 0;
1798 }
1799
1800 /**
1801 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1802 * @irq: IRQ number
1803 * @data: Pointer to the Xilinx MCDMA channel structure
1804 *
1805 * Return: IRQ_HANDLED/IRQ_NONE
1806 */
xilinx_mcdma_irq_handler(int irq,void * data)1807 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1808 {
1809 struct xilinx_dma_chan *chan = data;
1810 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1811
1812 if (chan->direction == DMA_DEV_TO_MEM)
1813 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1814 else
1815 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1816
1817 /* Read the channel id raising the interrupt*/
1818 chan_sermask = dma_ctrl_read(chan, ser_offset);
1819 chan_id = ffs(chan_sermask);
1820
1821 if (!chan_id)
1822 return IRQ_NONE;
1823
1824 if (chan->direction == DMA_DEV_TO_MEM)
1825 chan_offset = chan->xdev->dma_config->max_channels / 2;
1826
1827 chan_offset = chan_offset + (chan_id - 1);
1828 chan = chan->xdev->chan[chan_offset];
1829 /* Read the status and ack the interrupts. */
1830 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1831 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1832 return IRQ_NONE;
1833
1834 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1835 status & XILINX_MCDMA_IRQ_ALL_MASK);
1836
1837 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1838 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1839 chan,
1840 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1841 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1842 (chan->tdest)),
1843 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1844 (chan->tdest)));
1845 chan->err = true;
1846 }
1847
1848 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1849 /*
1850 * Device takes too long to do the transfer when user requires
1851 * responsiveness.
1852 */
1853 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1854 }
1855
1856 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1857 spin_lock(&chan->lock);
1858 xilinx_dma_complete_descriptor(chan);
1859 chan->idle = true;
1860 chan->start_transfer(chan);
1861 spin_unlock(&chan->lock);
1862 }
1863
1864 tasklet_hi_schedule(&chan->tasklet);
1865 return IRQ_HANDLED;
1866 }
1867
1868 /**
1869 * xilinx_dma_irq_handler - DMA Interrupt handler
1870 * @irq: IRQ number
1871 * @data: Pointer to the Xilinx DMA channel structure
1872 *
1873 * Return: IRQ_HANDLED/IRQ_NONE
1874 */
xilinx_dma_irq_handler(int irq,void * data)1875 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1876 {
1877 struct xilinx_dma_chan *chan = data;
1878 u32 status;
1879
1880 /* Read the status and ack the interrupts. */
1881 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1882 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1883 return IRQ_NONE;
1884
1885 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1886 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1887
1888 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1889 /*
1890 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1891 * error is recoverable, ignore it. Otherwise flag the error.
1892 *
1893 * Only recoverable errors can be cleared in the DMASR register,
1894 * make sure not to write to other error bits to 1.
1895 */
1896 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1897
1898 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1899 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1900
1901 if (!chan->flush_on_fsync ||
1902 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1903 dev_err(chan->dev,
1904 "Channel %p has errors %x, cdr %x tdr %x\n",
1905 chan, errors,
1906 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1907 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1908 chan->err = true;
1909 }
1910 }
1911
1912 if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
1913 XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
1914 spin_lock(&chan->lock);
1915 xilinx_dma_complete_descriptor(chan);
1916 chan->idle = true;
1917 chan->start_transfer(chan);
1918 spin_unlock(&chan->lock);
1919 }
1920
1921 tasklet_schedule(&chan->tasklet);
1922 return IRQ_HANDLED;
1923 }
1924
1925 /**
1926 * append_desc_queue - Queuing descriptor
1927 * @chan: Driver specific dma channel
1928 * @desc: dma transaction descriptor
1929 */
append_desc_queue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)1930 static void append_desc_queue(struct xilinx_dma_chan *chan,
1931 struct xilinx_dma_tx_descriptor *desc)
1932 {
1933 struct xilinx_vdma_tx_segment *tail_segment;
1934 struct xilinx_dma_tx_descriptor *tail_desc;
1935 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1936 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1937 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1938
1939 if (list_empty(&chan->pending_list))
1940 goto append;
1941
1942 /*
1943 * Add the hardware descriptor to the chain of hardware descriptors
1944 * that already exists in memory.
1945 */
1946 tail_desc = list_last_entry(&chan->pending_list,
1947 struct xilinx_dma_tx_descriptor, node);
1948 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1949 tail_segment = list_last_entry(&tail_desc->segments,
1950 struct xilinx_vdma_tx_segment,
1951 node);
1952 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1953 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1954 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1955 struct xilinx_cdma_tx_segment,
1956 node);
1957 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1958 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1959 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1960 struct xilinx_axidma_tx_segment,
1961 node);
1962 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1963 } else {
1964 aximcdma_tail_segment =
1965 list_last_entry(&tail_desc->segments,
1966 struct xilinx_aximcdma_tx_segment,
1967 node);
1968 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1969 }
1970
1971 /*
1972 * Add the software descriptor and all children to the list
1973 * of pending transactions
1974 */
1975 append:
1976 list_add_tail(&desc->node, &chan->pending_list);
1977 chan->desc_pendingcount++;
1978
1979 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1980 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1981 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1982 chan->desc_pendingcount = chan->num_frms;
1983 }
1984 }
1985
1986 /**
1987 * xilinx_dma_tx_submit - Submit DMA transaction
1988 * @tx: Async transaction descriptor
1989 *
1990 * Return: cookie value on success and failure value on error
1991 */
xilinx_dma_tx_submit(struct dma_async_tx_descriptor * tx)1992 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1993 {
1994 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1995 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1996 dma_cookie_t cookie;
1997 unsigned long flags;
1998 int err;
1999
2000 if (chan->cyclic) {
2001 xilinx_dma_free_tx_descriptor(chan, desc);
2002 return -EBUSY;
2003 }
2004
2005 if (chan->err) {
2006 /*
2007 * If reset fails, need to hard reset the system.
2008 * Channel is no longer functional
2009 */
2010 err = xilinx_dma_chan_reset(chan);
2011 if (err < 0)
2012 return err;
2013 }
2014
2015 spin_lock_irqsave(&chan->lock, flags);
2016
2017 cookie = dma_cookie_assign(tx);
2018
2019 /* Put this transaction onto the tail of the pending queue */
2020 append_desc_queue(chan, desc);
2021
2022 if (desc->cyclic)
2023 chan->cyclic = true;
2024
2025 chan->terminating = false;
2026
2027 spin_unlock_irqrestore(&chan->lock, flags);
2028
2029 return cookie;
2030 }
2031
2032 /**
2033 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
2034 * DMA_SLAVE transaction
2035 * @dchan: DMA channel
2036 * @xt: Interleaved template pointer
2037 * @flags: transfer ack flags
2038 *
2039 * Return: Async transaction descriptor on success and NULL on failure
2040 */
2041 static struct dma_async_tx_descriptor *
xilinx_vdma_dma_prep_interleaved(struct dma_chan * dchan,struct dma_interleaved_template * xt,unsigned long flags)2042 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2043 struct dma_interleaved_template *xt,
2044 unsigned long flags)
2045 {
2046 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2047 struct xilinx_dma_tx_descriptor *desc;
2048 struct xilinx_vdma_tx_segment *segment;
2049 struct xilinx_vdma_desc_hw *hw;
2050
2051 if (!is_slave_direction(xt->dir))
2052 return NULL;
2053
2054 if (!xt->numf || !xt->sgl[0].size)
2055 return NULL;
2056
2057 if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
2058 xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
2059 return NULL;
2060
2061 if (xt->frame_size != 1)
2062 return NULL;
2063
2064 /* Allocate a transaction descriptor. */
2065 desc = xilinx_dma_alloc_tx_descriptor(chan);
2066 if (!desc)
2067 return NULL;
2068
2069 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2070 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2071 async_tx_ack(&desc->async_tx);
2072
2073 /* Allocate the link descriptor from DMA pool */
2074 segment = xilinx_vdma_alloc_tx_segment(chan);
2075 if (!segment)
2076 goto error;
2077
2078 /* Fill in the hardware descriptor */
2079 hw = &segment->hw;
2080 hw->vsize = xt->numf;
2081 hw->hsize = xt->sgl[0].size;
2082 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2083 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2084 hw->stride |= chan->config.frm_dly <<
2085 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2086
2087 if (xt->dir != DMA_MEM_TO_DEV) {
2088 if (chan->ext_addr) {
2089 hw->buf_addr = lower_32_bits(xt->dst_start);
2090 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2091 } else {
2092 hw->buf_addr = xt->dst_start;
2093 }
2094 } else {
2095 if (chan->ext_addr) {
2096 hw->buf_addr = lower_32_bits(xt->src_start);
2097 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2098 } else {
2099 hw->buf_addr = xt->src_start;
2100 }
2101 }
2102
2103 /* Insert the segment into the descriptor segments list. */
2104 list_add_tail(&segment->node, &desc->segments);
2105
2106 /* Link the last hardware descriptor with the first. */
2107 segment = list_first_entry(&desc->segments,
2108 struct xilinx_vdma_tx_segment, node);
2109 desc->async_tx.phys = segment->phys;
2110
2111 return &desc->async_tx;
2112
2113 error:
2114 xilinx_dma_free_tx_descriptor(chan, desc);
2115 return NULL;
2116 }
2117
2118 /**
2119 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2120 * @dchan: DMA channel
2121 * @dma_dst: destination address
2122 * @dma_src: source address
2123 * @len: transfer length
2124 * @flags: transfer ack flags
2125 *
2126 * Return: Async transaction descriptor on success and NULL on failure
2127 */
2128 static struct dma_async_tx_descriptor *
xilinx_cdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)2129 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2130 dma_addr_t dma_src, size_t len, unsigned long flags)
2131 {
2132 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2133 struct xilinx_dma_tx_descriptor *desc;
2134 struct xilinx_cdma_tx_segment *segment;
2135 struct xilinx_cdma_desc_hw *hw;
2136
2137 if (!len || len > chan->xdev->max_buffer_len)
2138 return NULL;
2139
2140 desc = xilinx_dma_alloc_tx_descriptor(chan);
2141 if (!desc)
2142 return NULL;
2143
2144 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2145 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2146
2147 /* Allocate the link descriptor from DMA pool */
2148 segment = xilinx_cdma_alloc_tx_segment(chan);
2149 if (!segment)
2150 goto error;
2151
2152 hw = &segment->hw;
2153 hw->control = len;
2154 hw->src_addr = dma_src;
2155 hw->dest_addr = dma_dst;
2156 if (chan->ext_addr) {
2157 hw->src_addr_msb = upper_32_bits(dma_src);
2158 hw->dest_addr_msb = upper_32_bits(dma_dst);
2159 }
2160
2161 /* Insert the segment into the descriptor segments list. */
2162 list_add_tail(&segment->node, &desc->segments);
2163
2164 desc->async_tx.phys = segment->phys;
2165 hw->next_desc = segment->phys;
2166
2167 return &desc->async_tx;
2168
2169 error:
2170 xilinx_dma_free_tx_descriptor(chan, desc);
2171 return NULL;
2172 }
2173
2174 /**
2175 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2176 * @dchan: DMA channel
2177 * @sgl: scatterlist to transfer to/from
2178 * @sg_len: number of entries in @scatterlist
2179 * @direction: DMA direction
2180 * @flags: transfer ack flags
2181 * @context: APP words of the descriptor
2182 *
2183 * Return: Async transaction descriptor on success and NULL on failure
2184 */
xilinx_dma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2185 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2186 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2187 enum dma_transfer_direction direction, unsigned long flags,
2188 void *context)
2189 {
2190 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2191 struct xilinx_dma_tx_descriptor *desc;
2192 struct xilinx_axidma_tx_segment *segment = NULL;
2193 u32 *app_w = (u32 *)context;
2194 struct scatterlist *sg;
2195 size_t copy;
2196 size_t sg_used;
2197 unsigned int i;
2198
2199 if (!is_slave_direction(direction))
2200 return NULL;
2201
2202 /* Allocate a transaction descriptor. */
2203 desc = xilinx_dma_alloc_tx_descriptor(chan);
2204 if (!desc)
2205 return NULL;
2206
2207 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2208 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2209
2210 /* Build transactions using information in the scatter gather list */
2211 for_each_sg(sgl, sg, sg_len, i) {
2212 sg_used = 0;
2213
2214 /* Loop until the entire scatterlist entry is used */
2215 while (sg_used < sg_dma_len(sg)) {
2216 struct xilinx_axidma_desc_hw *hw;
2217
2218 /* Get a free segment */
2219 segment = xilinx_axidma_alloc_tx_segment(chan);
2220 if (!segment)
2221 goto error;
2222
2223 /*
2224 * Calculate the maximum number of bytes to transfer,
2225 * making sure it is less than the hw limit
2226 */
2227 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2228 sg_used);
2229 hw = &segment->hw;
2230
2231 /* Fill in the descriptor */
2232 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2233 sg_used, 0);
2234
2235 hw->control = copy;
2236
2237 if (chan->direction == DMA_MEM_TO_DEV) {
2238 if (app_w)
2239 memcpy(hw->app, app_w, sizeof(u32) *
2240 XILINX_DMA_NUM_APP_WORDS);
2241 }
2242
2243 sg_used += copy;
2244
2245 /*
2246 * Insert the segment into the descriptor segments
2247 * list.
2248 */
2249 list_add_tail(&segment->node, &desc->segments);
2250 }
2251 }
2252
2253 segment = list_first_entry(&desc->segments,
2254 struct xilinx_axidma_tx_segment, node);
2255 desc->async_tx.phys = segment->phys;
2256
2257 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2258 if (chan->direction == DMA_MEM_TO_DEV) {
2259 segment->hw.control |= XILINX_DMA_BD_SOP;
2260 segment = list_last_entry(&desc->segments,
2261 struct xilinx_axidma_tx_segment,
2262 node);
2263 segment->hw.control |= XILINX_DMA_BD_EOP;
2264 }
2265
2266 if (chan->xdev->has_axistream_connected)
2267 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
2268
2269 return &desc->async_tx;
2270
2271 error:
2272 xilinx_dma_free_tx_descriptor(chan, desc);
2273 return NULL;
2274 }
2275
2276 /**
2277 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2278 * @dchan: DMA channel
2279 * @buf_addr: Physical address of the buffer
2280 * @buf_len: Total length of the cyclic buffers
2281 * @period_len: length of individual cyclic buffer
2282 * @direction: DMA direction
2283 * @flags: transfer ack flags
2284 *
2285 * Return: Async transaction descriptor on success and NULL on failure
2286 */
xilinx_dma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)2287 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2288 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2289 size_t period_len, enum dma_transfer_direction direction,
2290 unsigned long flags)
2291 {
2292 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2293 struct xilinx_dma_tx_descriptor *desc;
2294 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2295 size_t copy, sg_used;
2296 unsigned int num_periods;
2297 int i;
2298 u32 reg;
2299
2300 if (!period_len)
2301 return NULL;
2302
2303 num_periods = buf_len / period_len;
2304
2305 if (!num_periods)
2306 return NULL;
2307
2308 if (!is_slave_direction(direction))
2309 return NULL;
2310
2311 /* Allocate a transaction descriptor. */
2312 desc = xilinx_dma_alloc_tx_descriptor(chan);
2313 if (!desc)
2314 return NULL;
2315
2316 chan->direction = direction;
2317 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2318 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2319
2320 for (i = 0; i < num_periods; ++i) {
2321 sg_used = 0;
2322
2323 while (sg_used < period_len) {
2324 struct xilinx_axidma_desc_hw *hw;
2325
2326 /* Get a free segment */
2327 segment = xilinx_axidma_alloc_tx_segment(chan);
2328 if (!segment)
2329 goto error;
2330
2331 /*
2332 * Calculate the maximum number of bytes to transfer,
2333 * making sure it is less than the hw limit
2334 */
2335 copy = xilinx_dma_calc_copysize(chan, period_len,
2336 sg_used);
2337 hw = &segment->hw;
2338 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2339 period_len * i);
2340 hw->control = copy;
2341
2342 if (prev)
2343 prev->hw.next_desc = segment->phys;
2344
2345 prev = segment;
2346 sg_used += copy;
2347
2348 /*
2349 * Insert the segment into the descriptor segments
2350 * list.
2351 */
2352 list_add_tail(&segment->node, &desc->segments);
2353 }
2354 }
2355
2356 head_segment = list_first_entry(&desc->segments,
2357 struct xilinx_axidma_tx_segment, node);
2358 desc->async_tx.phys = head_segment->phys;
2359
2360 desc->cyclic = true;
2361 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2362 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2363 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2364
2365 segment = list_last_entry(&desc->segments,
2366 struct xilinx_axidma_tx_segment,
2367 node);
2368 segment->hw.next_desc = (u32) head_segment->phys;
2369
2370 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2371 if (direction == DMA_MEM_TO_DEV) {
2372 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2373 segment->hw.control |= XILINX_DMA_BD_EOP;
2374 }
2375
2376 return &desc->async_tx;
2377
2378 error:
2379 xilinx_dma_free_tx_descriptor(chan, desc);
2380 return NULL;
2381 }
2382
2383 /**
2384 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2385 * @dchan: DMA channel
2386 * @sgl: scatterlist to transfer to/from
2387 * @sg_len: number of entries in @scatterlist
2388 * @direction: DMA direction
2389 * @flags: transfer ack flags
2390 * @context: APP words of the descriptor
2391 *
2392 * Return: Async transaction descriptor on success and NULL on failure
2393 */
2394 static struct dma_async_tx_descriptor *
xilinx_mcdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2395 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2396 unsigned int sg_len,
2397 enum dma_transfer_direction direction,
2398 unsigned long flags, void *context)
2399 {
2400 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2401 struct xilinx_dma_tx_descriptor *desc;
2402 struct xilinx_aximcdma_tx_segment *segment = NULL;
2403 u32 *app_w = (u32 *)context;
2404 struct scatterlist *sg;
2405 size_t copy;
2406 size_t sg_used;
2407 unsigned int i;
2408
2409 if (!is_slave_direction(direction))
2410 return NULL;
2411
2412 /* Allocate a transaction descriptor. */
2413 desc = xilinx_dma_alloc_tx_descriptor(chan);
2414 if (!desc)
2415 return NULL;
2416
2417 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2418 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2419
2420 /* Build transactions using information in the scatter gather list */
2421 for_each_sg(sgl, sg, sg_len, i) {
2422 sg_used = 0;
2423
2424 /* Loop until the entire scatterlist entry is used */
2425 while (sg_used < sg_dma_len(sg)) {
2426 struct xilinx_aximcdma_desc_hw *hw;
2427
2428 /* Get a free segment */
2429 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2430 if (!segment)
2431 goto error;
2432
2433 /*
2434 * Calculate the maximum number of bytes to transfer,
2435 * making sure it is less than the hw limit
2436 */
2437 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2438 chan->xdev->max_buffer_len);
2439 hw = &segment->hw;
2440
2441 /* Fill in the descriptor */
2442 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2443 sg_used);
2444 hw->control = copy;
2445
2446 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2447 memcpy(hw->app, app_w, sizeof(u32) *
2448 XILINX_DMA_NUM_APP_WORDS);
2449 }
2450
2451 sg_used += copy;
2452 /*
2453 * Insert the segment into the descriptor segments
2454 * list.
2455 */
2456 list_add_tail(&segment->node, &desc->segments);
2457 }
2458 }
2459
2460 segment = list_first_entry(&desc->segments,
2461 struct xilinx_aximcdma_tx_segment, node);
2462 desc->async_tx.phys = segment->phys;
2463
2464 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2465 if (chan->direction == DMA_MEM_TO_DEV) {
2466 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2467 segment = list_last_entry(&desc->segments,
2468 struct xilinx_aximcdma_tx_segment,
2469 node);
2470 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2471 }
2472
2473 return &desc->async_tx;
2474
2475 error:
2476 xilinx_dma_free_tx_descriptor(chan, desc);
2477
2478 return NULL;
2479 }
2480
2481 /**
2482 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2483 * @dchan: Driver specific DMA Channel pointer
2484 *
2485 * Return: '0' always.
2486 */
xilinx_dma_terminate_all(struct dma_chan * dchan)2487 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2488 {
2489 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2490 u32 reg;
2491 int err;
2492
2493 if (!chan->cyclic) {
2494 err = chan->stop_transfer(chan);
2495 if (err) {
2496 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2497 chan, dma_ctrl_read(chan,
2498 XILINX_DMA_REG_DMASR));
2499 chan->err = true;
2500 }
2501 }
2502
2503 xilinx_dma_chan_reset(chan);
2504 /* Remove and free all of the descriptors in the lists */
2505 chan->terminating = true;
2506 xilinx_dma_free_descriptors(chan);
2507 chan->idle = true;
2508
2509 if (chan->cyclic) {
2510 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2511 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2512 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2513 chan->cyclic = false;
2514 }
2515
2516 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2517 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2518 XILINX_CDMA_CR_SGMODE);
2519
2520 return 0;
2521 }
2522
xilinx_dma_synchronize(struct dma_chan * dchan)2523 static void xilinx_dma_synchronize(struct dma_chan *dchan)
2524 {
2525 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2526
2527 tasklet_kill(&chan->tasklet);
2528 }
2529
2530 /**
2531 * xilinx_vdma_channel_set_config - Configure VDMA channel
2532 * Run-time configuration for Axi VDMA, supports:
2533 * . halt the channel
2534 * . configure interrupt coalescing and inter-packet delay threshold
2535 * . start/stop parking
2536 * . enable genlock
2537 *
2538 * @dchan: DMA channel
2539 * @cfg: VDMA device configuration pointer
2540 *
2541 * Return: '0' on success and failure value on error
2542 */
xilinx_vdma_channel_set_config(struct dma_chan * dchan,struct xilinx_vdma_config * cfg)2543 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2544 struct xilinx_vdma_config *cfg)
2545 {
2546 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2547 u32 dmacr;
2548
2549 if (cfg->reset)
2550 return xilinx_dma_chan_reset(chan);
2551
2552 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2553
2554 chan->config.frm_dly = cfg->frm_dly;
2555 chan->config.park = cfg->park;
2556
2557 /* genlock settings */
2558 chan->config.gen_lock = cfg->gen_lock;
2559 chan->config.master = cfg->master;
2560
2561 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2562 if (cfg->gen_lock && chan->genlock) {
2563 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2564 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2565 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2566 }
2567
2568 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2569 chan->config.vflip_en = cfg->vflip_en;
2570
2571 if (cfg->park)
2572 chan->config.park_frm = cfg->park_frm;
2573 else
2574 chan->config.park_frm = -1;
2575
2576 chan->config.coalesc = cfg->coalesc;
2577 chan->config.delay = cfg->delay;
2578
2579 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2580 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2581 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2582 chan->config.coalesc = cfg->coalesc;
2583 }
2584
2585 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2586 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2587 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2588 chan->config.delay = cfg->delay;
2589 }
2590
2591 /* FSync Source selection */
2592 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2593 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2594
2595 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2596
2597 return 0;
2598 }
2599 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2600
2601 /* -----------------------------------------------------------------------------
2602 * Probe and remove
2603 */
2604
2605 /**
2606 * xilinx_dma_chan_remove - Per Channel remove function
2607 * @chan: Driver specific DMA channel
2608 */
xilinx_dma_chan_remove(struct xilinx_dma_chan * chan)2609 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2610 {
2611 /* Disable all interrupts */
2612 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2613 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2614
2615 if (chan->irq > 0)
2616 free_irq(chan->irq, chan);
2617
2618 tasklet_kill(&chan->tasklet);
2619
2620 list_del(&chan->common.device_node);
2621 }
2622
axidma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** sg_clk,struct clk ** tmp_clk)2623 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2624 struct clk **tx_clk, struct clk **rx_clk,
2625 struct clk **sg_clk, struct clk **tmp_clk)
2626 {
2627 int err;
2628
2629 *tmp_clk = NULL;
2630
2631 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2632 if (IS_ERR(*axi_clk))
2633 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2634
2635 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2636 if (IS_ERR(*tx_clk))
2637 *tx_clk = NULL;
2638
2639 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2640 if (IS_ERR(*rx_clk))
2641 *rx_clk = NULL;
2642
2643 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2644 if (IS_ERR(*sg_clk))
2645 *sg_clk = NULL;
2646
2647 err = clk_prepare_enable(*axi_clk);
2648 if (err) {
2649 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2650 return err;
2651 }
2652
2653 err = clk_prepare_enable(*tx_clk);
2654 if (err) {
2655 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2656 goto err_disable_axiclk;
2657 }
2658
2659 err = clk_prepare_enable(*rx_clk);
2660 if (err) {
2661 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2662 goto err_disable_txclk;
2663 }
2664
2665 err = clk_prepare_enable(*sg_clk);
2666 if (err) {
2667 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2668 goto err_disable_rxclk;
2669 }
2670
2671 return 0;
2672
2673 err_disable_rxclk:
2674 clk_disable_unprepare(*rx_clk);
2675 err_disable_txclk:
2676 clk_disable_unprepare(*tx_clk);
2677 err_disable_axiclk:
2678 clk_disable_unprepare(*axi_clk);
2679
2680 return err;
2681 }
2682
axicdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** dev_clk,struct clk ** tmp_clk,struct clk ** tmp1_clk,struct clk ** tmp2_clk)2683 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2684 struct clk **dev_clk, struct clk **tmp_clk,
2685 struct clk **tmp1_clk, struct clk **tmp2_clk)
2686 {
2687 int err;
2688
2689 *tmp_clk = NULL;
2690 *tmp1_clk = NULL;
2691 *tmp2_clk = NULL;
2692
2693 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2694 if (IS_ERR(*axi_clk))
2695 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2696
2697 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2698 if (IS_ERR(*dev_clk))
2699 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2700
2701 err = clk_prepare_enable(*axi_clk);
2702 if (err) {
2703 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2704 return err;
2705 }
2706
2707 err = clk_prepare_enable(*dev_clk);
2708 if (err) {
2709 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2710 goto err_disable_axiclk;
2711 }
2712
2713 return 0;
2714
2715 err_disable_axiclk:
2716 clk_disable_unprepare(*axi_clk);
2717
2718 return err;
2719 }
2720
axivdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** txs_clk,struct clk ** rx_clk,struct clk ** rxs_clk)2721 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2722 struct clk **tx_clk, struct clk **txs_clk,
2723 struct clk **rx_clk, struct clk **rxs_clk)
2724 {
2725 int err;
2726
2727 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2728 if (IS_ERR(*axi_clk))
2729 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2730
2731 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2732 if (IS_ERR(*tx_clk))
2733 *tx_clk = NULL;
2734
2735 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2736 if (IS_ERR(*txs_clk))
2737 *txs_clk = NULL;
2738
2739 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2740 if (IS_ERR(*rx_clk))
2741 *rx_clk = NULL;
2742
2743 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2744 if (IS_ERR(*rxs_clk))
2745 *rxs_clk = NULL;
2746
2747 err = clk_prepare_enable(*axi_clk);
2748 if (err) {
2749 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2750 err);
2751 return err;
2752 }
2753
2754 err = clk_prepare_enable(*tx_clk);
2755 if (err) {
2756 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2757 goto err_disable_axiclk;
2758 }
2759
2760 err = clk_prepare_enable(*txs_clk);
2761 if (err) {
2762 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2763 goto err_disable_txclk;
2764 }
2765
2766 err = clk_prepare_enable(*rx_clk);
2767 if (err) {
2768 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2769 goto err_disable_txsclk;
2770 }
2771
2772 err = clk_prepare_enable(*rxs_clk);
2773 if (err) {
2774 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2775 goto err_disable_rxclk;
2776 }
2777
2778 return 0;
2779
2780 err_disable_rxclk:
2781 clk_disable_unprepare(*rx_clk);
2782 err_disable_txsclk:
2783 clk_disable_unprepare(*txs_clk);
2784 err_disable_txclk:
2785 clk_disable_unprepare(*tx_clk);
2786 err_disable_axiclk:
2787 clk_disable_unprepare(*axi_clk);
2788
2789 return err;
2790 }
2791
xdma_disable_allclks(struct xilinx_dma_device * xdev)2792 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2793 {
2794 clk_disable_unprepare(xdev->rxs_clk);
2795 clk_disable_unprepare(xdev->rx_clk);
2796 clk_disable_unprepare(xdev->txs_clk);
2797 clk_disable_unprepare(xdev->tx_clk);
2798 clk_disable_unprepare(xdev->axi_clk);
2799 }
2800
2801 /**
2802 * xilinx_dma_chan_probe - Per Channel Probing
2803 * It get channel features from the device tree entry and
2804 * initialize special channel handling routines
2805 *
2806 * @xdev: Driver specific device structure
2807 * @node: Device node
2808 *
2809 * Return: '0' on success and failure value on error
2810 */
xilinx_dma_chan_probe(struct xilinx_dma_device * xdev,struct device_node * node)2811 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2812 struct device_node *node)
2813 {
2814 struct xilinx_dma_chan *chan;
2815 bool has_dre = false;
2816 u32 value, width;
2817 int err;
2818
2819 /* Allocate and initialize the channel structure */
2820 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2821 if (!chan)
2822 return -ENOMEM;
2823
2824 chan->dev = xdev->dev;
2825 chan->xdev = xdev;
2826 chan->desc_pendingcount = 0x0;
2827 chan->ext_addr = xdev->ext_addr;
2828 /* This variable ensures that descriptors are not
2829 * Submitted when dma engine is in progress. This variable is
2830 * Added to avoid polling for a bit in the status register to
2831 * Know dma state in the driver hot path.
2832 */
2833 chan->idle = true;
2834
2835 spin_lock_init(&chan->lock);
2836 INIT_LIST_HEAD(&chan->pending_list);
2837 INIT_LIST_HEAD(&chan->done_list);
2838 INIT_LIST_HEAD(&chan->active_list);
2839 INIT_LIST_HEAD(&chan->free_seg_list);
2840
2841 /* Retrieve the channel properties from the device tree */
2842 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2843
2844 of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
2845
2846 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2847
2848 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2849 if (err) {
2850 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2851 return err;
2852 }
2853 width = value >> 3; /* Convert bits to bytes */
2854
2855 /* If data width is greater than 8 bytes, DRE is not in hw */
2856 if (width > 8)
2857 has_dre = false;
2858
2859 if (!has_dre)
2860 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2861
2862 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2863 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2864 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2865 chan->direction = DMA_MEM_TO_DEV;
2866 chan->id = xdev->mm2s_chan_id++;
2867 chan->tdest = chan->id;
2868
2869 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2870 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2871 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2872 chan->config.park = 1;
2873
2874 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2875 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2876 chan->flush_on_fsync = true;
2877 }
2878 } else if (of_device_is_compatible(node,
2879 "xlnx,axi-vdma-s2mm-channel") ||
2880 of_device_is_compatible(node,
2881 "xlnx,axi-dma-s2mm-channel")) {
2882 chan->direction = DMA_DEV_TO_MEM;
2883 chan->id = xdev->s2mm_chan_id++;
2884 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2885 chan->has_vflip = of_property_read_bool(node,
2886 "xlnx,enable-vert-flip");
2887 if (chan->has_vflip) {
2888 chan->config.vflip_en = dma_read(chan,
2889 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2890 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2891 }
2892
2893 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2894 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2895 else
2896 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2897
2898 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2899 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2900 chan->config.park = 1;
2901
2902 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2903 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2904 chan->flush_on_fsync = true;
2905 }
2906 } else {
2907 dev_err(xdev->dev, "Invalid channel compatible node\n");
2908 return -EINVAL;
2909 }
2910
2911 /* Request the interrupt */
2912 chan->irq = of_irq_get(node, chan->tdest);
2913 if (chan->irq < 0)
2914 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
2915 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2916 IRQF_SHARED, "xilinx-dma-controller", chan);
2917 if (err) {
2918 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2919 return err;
2920 }
2921
2922 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2923 chan->start_transfer = xilinx_dma_start_transfer;
2924 chan->stop_transfer = xilinx_dma_stop_transfer;
2925 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2926 chan->start_transfer = xilinx_mcdma_start_transfer;
2927 chan->stop_transfer = xilinx_dma_stop_transfer;
2928 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2929 chan->start_transfer = xilinx_cdma_start_transfer;
2930 chan->stop_transfer = xilinx_cdma_stop_transfer;
2931 } else {
2932 chan->start_transfer = xilinx_vdma_start_transfer;
2933 chan->stop_transfer = xilinx_dma_stop_transfer;
2934 }
2935
2936 /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
2937 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2938 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2939 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2940 XILINX_DMA_DMASR_SG_MASK)
2941 chan->has_sg = true;
2942 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2943 chan->has_sg ? "enabled" : "disabled");
2944 }
2945
2946 /* Initialize the tasklet */
2947 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2948
2949 /*
2950 * Initialize the DMA channel and add it to the DMA engine channels
2951 * list.
2952 */
2953 chan->common.device = &xdev->common;
2954
2955 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2956 xdev->chan[chan->id] = chan;
2957
2958 /* Reset the channel */
2959 err = xilinx_dma_chan_reset(chan);
2960 if (err < 0) {
2961 dev_err(xdev->dev, "Reset channel failed\n");
2962 return err;
2963 }
2964
2965 return 0;
2966 }
2967
2968 /**
2969 * xilinx_dma_child_probe - Per child node probe
2970 * It get number of dma-channels per child node from
2971 * device-tree and initializes all the channels.
2972 *
2973 * @xdev: Driver specific device structure
2974 * @node: Device node
2975 *
2976 * Return: '0' on success and failure value on error.
2977 */
xilinx_dma_child_probe(struct xilinx_dma_device * xdev,struct device_node * node)2978 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2979 struct device_node *node)
2980 {
2981 int ret, i;
2982 u32 nr_channels = 1;
2983
2984 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2985 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2986 dev_warn(xdev->dev, "missing dma-channels property\n");
2987
2988 for (i = 0; i < nr_channels; i++) {
2989 ret = xilinx_dma_chan_probe(xdev, node);
2990 if (ret)
2991 return ret;
2992 }
2993
2994 return 0;
2995 }
2996
2997 /**
2998 * of_dma_xilinx_xlate - Translation function
2999 * @dma_spec: Pointer to DMA specifier as found in the device tree
3000 * @ofdma: Pointer to DMA controller data
3001 *
3002 * Return: DMA channel pointer on success and NULL on error
3003 */
of_dma_xilinx_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)3004 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
3005 struct of_dma *ofdma)
3006 {
3007 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
3008 int chan_id = dma_spec->args[0];
3009
3010 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
3011 return NULL;
3012
3013 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
3014 }
3015
3016 static const struct xilinx_dma_config axidma_config = {
3017 .dmatype = XDMA_TYPE_AXIDMA,
3018 .clk_init = axidma_clk_init,
3019 .irq_handler = xilinx_dma_irq_handler,
3020 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3021 };
3022
3023 static const struct xilinx_dma_config aximcdma_config = {
3024 .dmatype = XDMA_TYPE_AXIMCDMA,
3025 .clk_init = axidma_clk_init,
3026 .irq_handler = xilinx_mcdma_irq_handler,
3027 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
3028 };
3029 static const struct xilinx_dma_config axicdma_config = {
3030 .dmatype = XDMA_TYPE_CDMA,
3031 .clk_init = axicdma_clk_init,
3032 .irq_handler = xilinx_dma_irq_handler,
3033 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
3034 };
3035
3036 static const struct xilinx_dma_config axivdma_config = {
3037 .dmatype = XDMA_TYPE_VDMA,
3038 .clk_init = axivdma_clk_init,
3039 .irq_handler = xilinx_dma_irq_handler,
3040 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3041 };
3042
3043 static const struct of_device_id xilinx_dma_of_ids[] = {
3044 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
3045 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
3046 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
3047 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
3048 {}
3049 };
3050 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
3051
3052 /**
3053 * xilinx_dma_probe - Driver probe function
3054 * @pdev: Pointer to the platform_device structure
3055 *
3056 * Return: '0' on success and failure value on error
3057 */
xilinx_dma_probe(struct platform_device * pdev)3058 static int xilinx_dma_probe(struct platform_device *pdev)
3059 {
3060 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3061 struct clk **, struct clk **, struct clk **)
3062 = axivdma_clk_init;
3063 struct device_node *node = pdev->dev.of_node;
3064 struct xilinx_dma_device *xdev;
3065 struct device_node *child, *np = pdev->dev.of_node;
3066 u32 num_frames, addr_width, len_width;
3067 int i, err;
3068
3069 /* Allocate and initialize the DMA engine structure */
3070 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3071 if (!xdev)
3072 return -ENOMEM;
3073
3074 xdev->dev = &pdev->dev;
3075 if (np) {
3076 const struct of_device_id *match;
3077
3078 match = of_match_node(xilinx_dma_of_ids, np);
3079 if (match && match->data) {
3080 xdev->dma_config = match->data;
3081 clk_init = xdev->dma_config->clk_init;
3082 }
3083 }
3084
3085 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3086 &xdev->rx_clk, &xdev->rxs_clk);
3087 if (err)
3088 return err;
3089
3090 /* Request and map I/O memory */
3091 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3092 if (IS_ERR(xdev->regs)) {
3093 err = PTR_ERR(xdev->regs);
3094 goto disable_clks;
3095 }
3096 /* Retrieve the DMA engine properties from the device tree */
3097 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3098 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3099
3100 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3101 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3102 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3103 &len_width)) {
3104 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3105 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3106 dev_warn(xdev->dev,
3107 "invalid xlnx,sg-length-width property value. Using default width\n");
3108 } else {
3109 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3110 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3111 xdev->max_buffer_len =
3112 GENMASK(len_width - 1, 0);
3113 }
3114 }
3115 }
3116
3117 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3118 xdev->has_axistream_connected =
3119 of_property_read_bool(node, "xlnx,axistream-connected");
3120 }
3121
3122 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3123 err = of_property_read_u32(node, "xlnx,num-fstores",
3124 &num_frames);
3125 if (err < 0) {
3126 dev_err(xdev->dev,
3127 "missing xlnx,num-fstores property\n");
3128 goto disable_clks;
3129 }
3130
3131 err = of_property_read_u32(node, "xlnx,flush-fsync",
3132 &xdev->flush_on_fsync);
3133 if (err < 0)
3134 dev_warn(xdev->dev,
3135 "missing xlnx,flush-fsync property\n");
3136 }
3137
3138 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3139 if (err < 0)
3140 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3141
3142 if (addr_width > 32)
3143 xdev->ext_addr = true;
3144 else
3145 xdev->ext_addr = false;
3146
3147 /* Set metadata mode */
3148 if (xdev->has_axistream_connected)
3149 xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
3150
3151 /* Set the dma mask bits */
3152 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3153 if (err < 0) {
3154 dev_err(xdev->dev, "DMA mask error %d\n", err);
3155 goto disable_clks;
3156 }
3157
3158 /* Initialize the DMA engine */
3159 xdev->common.dev = &pdev->dev;
3160
3161 INIT_LIST_HEAD(&xdev->common.channels);
3162 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3163 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3164 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3165 }
3166
3167 xdev->common.device_alloc_chan_resources =
3168 xilinx_dma_alloc_chan_resources;
3169 xdev->common.device_free_chan_resources =
3170 xilinx_dma_free_chan_resources;
3171 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3172 xdev->common.device_synchronize = xilinx_dma_synchronize;
3173 xdev->common.device_tx_status = xilinx_dma_tx_status;
3174 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3175 xdev->common.device_config = xilinx_dma_device_config;
3176 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3177 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3178 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3179 xdev->common.device_prep_dma_cyclic =
3180 xilinx_dma_prep_dma_cyclic;
3181 /* Residue calculation is supported by only AXI DMA and CDMA */
3182 xdev->common.residue_granularity =
3183 DMA_RESIDUE_GRANULARITY_SEGMENT;
3184 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3185 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3186 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3187 /* Residue calculation is supported by only AXI DMA and CDMA */
3188 xdev->common.residue_granularity =
3189 DMA_RESIDUE_GRANULARITY_SEGMENT;
3190 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3191 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3192 } else {
3193 xdev->common.device_prep_interleaved_dma =
3194 xilinx_vdma_dma_prep_interleaved;
3195 }
3196
3197 platform_set_drvdata(pdev, xdev);
3198
3199 /* Initialize the channels */
3200 for_each_child_of_node(node, child) {
3201 err = xilinx_dma_child_probe(xdev, child);
3202 if (err < 0) {
3203 of_node_put(child);
3204 goto error;
3205 }
3206 }
3207
3208 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3209 for (i = 0; i < xdev->dma_config->max_channels; i++)
3210 if (xdev->chan[i])
3211 xdev->chan[i]->num_frms = num_frames;
3212 }
3213
3214 /* Register the DMA engine with the core */
3215 err = dma_async_device_register(&xdev->common);
3216 if (err) {
3217 dev_err(xdev->dev, "failed to register the dma device\n");
3218 goto error;
3219 }
3220
3221 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3222 xdev);
3223 if (err < 0) {
3224 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3225 dma_async_device_unregister(&xdev->common);
3226 goto error;
3227 }
3228
3229 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3230 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3231 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3232 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3233 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3234 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3235 else
3236 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3237
3238 return 0;
3239
3240 error:
3241 for (i = 0; i < xdev->dma_config->max_channels; i++)
3242 if (xdev->chan[i])
3243 xilinx_dma_chan_remove(xdev->chan[i]);
3244 disable_clks:
3245 xdma_disable_allclks(xdev);
3246
3247 return err;
3248 }
3249
3250 /**
3251 * xilinx_dma_remove - Driver remove function
3252 * @pdev: Pointer to the platform_device structure
3253 */
xilinx_dma_remove(struct platform_device * pdev)3254 static void xilinx_dma_remove(struct platform_device *pdev)
3255 {
3256 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3257 int i;
3258
3259 of_dma_controller_free(pdev->dev.of_node);
3260
3261 dma_async_device_unregister(&xdev->common);
3262
3263 for (i = 0; i < xdev->dma_config->max_channels; i++)
3264 if (xdev->chan[i])
3265 xilinx_dma_chan_remove(xdev->chan[i]);
3266
3267 xdma_disable_allclks(xdev);
3268 }
3269
3270 static struct platform_driver xilinx_vdma_driver = {
3271 .driver = {
3272 .name = "xilinx-vdma",
3273 .of_match_table = xilinx_dma_of_ids,
3274 },
3275 .probe = xilinx_dma_probe,
3276 .remove = xilinx_dma_remove,
3277 };
3278
3279 module_platform_driver(xilinx_vdma_driver);
3280
3281 MODULE_AUTHOR("Xilinx, Inc.");
3282 MODULE_DESCRIPTION("Xilinx VDMA driver");
3283 MODULE_LICENSE("GPL v2");
3284