2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
107 /* Register Direct Mode Registers */
108 #define XILINX_DMA_REG_VSIZE 0x0000
109 #define XILINX_DMA_REG_HSIZE 0x0004
111 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
118 /* HW specific definitions */
119 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
121 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
124 XILINX_DMA_DMASR_ERR_IRQ)
126 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
128 XILINX_DMA_DMASR_SOF_LATE_ERR | \
129 XILINX_DMA_DMASR_SG_DEC_ERR | \
130 XILINX_DMA_DMASR_SG_SLV_ERR | \
131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
133 XILINX_DMA_DMASR_DMA_DEC_ERR | \
134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
135 XILINX_DMA_DMASR_DMA_INT_ERR)
138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
140 * is enabled in the h/w system.
142 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
148 /* Axi VDMA Flush on Fsync bits */
149 #define XILINX_DMA_FLUSH_S2MM 3
150 #define XILINX_DMA_FLUSH_MM2S 2
151 #define XILINX_DMA_FLUSH_BOTH 1
153 /* Delay loop counter to prevent hardware failure */
154 #define XILINX_DMA_LOOP_COUNT 1000000
156 /* AXI DMA Specific Registers/Offsets */
157 #define XILINX_DMA_REG_SRCDSTADDR 0x18
158 #define XILINX_DMA_REG_BTT 0x28
160 /* AXI DMA Specific Masks/Bit fields */
161 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
162 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
163 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
164 #define XILINX_DMA_CR_COALESCE_SHIFT 16
165 #define XILINX_DMA_BD_SOP BIT(27)
166 #define XILINX_DMA_BD_EOP BIT(26)
167 #define XILINX_DMA_COALESCE_MAX 255
168 #define XILINX_DMA_NUM_DESCS 255
169 #define XILINX_DMA_NUM_APP_WORDS 5
171 /* Multi-Channel DMA Descriptor offsets*/
172 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
173 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
175 /* Multi-Channel DMA Masks/Shifts */
176 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
177 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
178 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
179 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
180 #define XILINX_DMA_BD_STRIDE_SHIFT 0
181 #define XILINX_DMA_BD_VSIZE_SHIFT 19
183 /* AXI CDMA Specific Registers/Offsets */
184 #define XILINX_CDMA_REG_SRCADDR 0x18
185 #define XILINX_CDMA_REG_DSTADDR 0x20
187 /* AXI CDMA Specific Masks */
188 #define XILINX_CDMA_CR_SGMODE BIT(3)
191 * struct xilinx_vdma_desc_hw - Hardware Descriptor
192 * @next_desc: Next Descriptor Pointer @0x00
193 * @pad1: Reserved @0x04
194 * @buf_addr: Buffer address @0x08
195 * @buf_addr_msb: MSB of Buffer address @0x0C
196 * @vsize: Vertical Size @0x10
197 * @hsize: Horizontal Size @0x14
198 * @stride: Number of bytes between the first
199 * pixels of each horizontal line @0x18
201 struct xilinx_vdma_desc_hw {
212 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
213 * @next_desc: Next Descriptor Pointer @0x00
214 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
215 * @buf_addr: Buffer address @0x08
216 * @buf_addr_msb: MSB of Buffer address @0x0C
217 * @mcdma_control: Control field for mcdma @0x10
218 * @vsize_stride: Vsize and Stride field for mcdma @0x14
219 * @control: Control field @0x18
220 * @status: Status field @0x1C
221 * @app: APP Fields @0x20 - 0x30
223 struct xilinx_axidma_desc_hw {
232 u32 app[XILINX_DMA_NUM_APP_WORDS];
236 * struct xilinx_cdma_desc_hw - Hardware Descriptor
237 * @next_desc: Next Descriptor Pointer @0x00
238 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
239 * @src_addr: Source address @0x08
240 * @src_addr_msb: Source address MSB @0x0C
241 * @dest_addr: Destination address @0x10
242 * @dest_addr_msb: Destination address MSB @0x14
243 * @control: Control field @0x18
244 * @status: Status field @0x1C
246 struct xilinx_cdma_desc_hw {
258 * struct xilinx_vdma_tx_segment - Descriptor segment
259 * @hw: Hardware descriptor
260 * @node: Node in the descriptor segments list
261 * @phys: Physical address of segment
263 struct xilinx_vdma_tx_segment {
264 struct xilinx_vdma_desc_hw hw;
265 struct list_head node;
270 * struct xilinx_axidma_tx_segment - Descriptor segment
271 * @hw: Hardware descriptor
272 * @node: Node in the descriptor segments list
273 * @phys: Physical address of segment
275 struct xilinx_axidma_tx_segment {
276 struct xilinx_axidma_desc_hw hw;
277 struct list_head node;
282 * struct xilinx_cdma_tx_segment - Descriptor segment
283 * @hw: Hardware descriptor
284 * @node: Node in the descriptor segments list
285 * @phys: Physical address of segment
287 struct xilinx_cdma_tx_segment {
288 struct xilinx_cdma_desc_hw hw;
289 struct list_head node;
294 * struct xilinx_dma_tx_descriptor - Per Transaction structure
295 * @async_tx: Async transaction descriptor
296 * @segments: TX segments list
297 * @node: Node in the channel descriptors list
298 * @cyclic: Check for cyclic transfers.
300 struct xilinx_dma_tx_descriptor {
301 struct dma_async_tx_descriptor async_tx;
302 struct list_head segments;
303 struct list_head node;
308 * struct xilinx_dma_chan - Driver specific DMA channel structure
309 * @xdev: Driver specific device structure
310 * @ctrl_offset: Control registers offset
311 * @desc_offset: TX descriptor registers offset
312 * @lock: Descriptor operation lock
313 * @pending_list: Descriptors waiting
314 * @active_list: Descriptors ready to submit
315 * @done_list: Complete descriptors
316 * @free_seg_list: Free descriptors
317 * @common: DMA common channel
318 * @desc_pool: Descriptors pool
319 * @dev: The dma device
322 * @direction: Transfer direction
323 * @num_frms: Number of frames
324 * @has_sg: Support scatter transfers
325 * @cyclic: Check for cyclic transfers.
326 * @genlock: Support genlock mode
327 * @err: Channel has errors
328 * @idle: Check for channel idle
329 * @tasklet: Cleanup work after irq
330 * @config: Device configuration info
331 * @flush_on_fsync: Flush on Frame sync
332 * @desc_pendingcount: Descriptor pending count
333 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
334 * @desc_submitcount: Descriptor h/w submitted count
335 * @residue: Residue for AXI DMA
336 * @seg_v: Statically allocated segments base
337 * @seg_p: Physical allocated segments base
338 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
339 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
340 * @start_transfer: Differentiate b/w DMA IP's transfer
341 * @tdest: TDEST value for mcdma
343 struct xilinx_dma_chan {
344 struct xilinx_dma_device *xdev;
348 struct list_head pending_list;
349 struct list_head active_list;
350 struct list_head done_list;
351 struct list_head free_seg_list;
352 struct dma_chan common;
353 struct dma_pool *desc_pool;
357 enum dma_transfer_direction direction;
364 struct tasklet_struct tasklet;
365 struct xilinx_vdma_config config;
367 u32 desc_pendingcount;
369 u32 desc_submitcount;
371 struct xilinx_axidma_tx_segment *seg_v;
373 struct xilinx_axidma_tx_segment *cyclic_seg_v;
374 dma_addr_t cyclic_seg_p;
375 void (*start_transfer)(struct xilinx_dma_chan *chan);
379 struct xilinx_dma_config {
380 enum xdma_ip_type dmatype;
381 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
382 struct clk **tx_clk, struct clk **txs_clk,
383 struct clk **rx_clk, struct clk **rxs_clk);
387 * struct xilinx_dma_device - DMA device structure
388 * @regs: I/O mapped base address
389 * @dev: Device Structure
390 * @common: DMA device structure
391 * @chan: Driver specific DMA channel
392 * @has_sg: Specifies whether Scatter-Gather is present or not
393 * @mcdma: Specifies whether Multi-Channel is present or not
394 * @flush_on_fsync: Flush on frame sync
395 * @ext_addr: Indicates 64 bit addressing is supported by dma device
396 * @pdev: Platform device structure pointer
397 * @dma_config: DMA config structure
398 * @axi_clk: DMA Axi4-lite interace clock
399 * @tx_clk: DMA mm2s clock
400 * @txs_clk: DMA mm2s stream clock
401 * @rx_clk: DMA s2mm clock
402 * @rxs_clk: DMA s2mm stream clock
403 * @nr_channels: Number of channels DMA device supports
404 * @chan_id: DMA channel identifier
406 struct xilinx_dma_device {
409 struct dma_device common;
410 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
415 struct platform_device *pdev;
416 const struct xilinx_dma_config *dma_config;
427 #define to_xilinx_chan(chan) \
428 container_of(chan, struct xilinx_dma_chan, common)
429 #define to_dma_tx_descriptor(tx) \
430 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
431 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
432 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
433 cond, delay_us, timeout_us)
436 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
438 return ioread32(chan->xdev->regs + reg);
441 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
443 iowrite32(value, chan->xdev->regs + reg);
446 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
449 dma_write(chan, chan->desc_offset + reg, value);
452 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
454 return dma_read(chan, chan->ctrl_offset + reg);
457 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
460 dma_write(chan, chan->ctrl_offset + reg, value);
463 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
466 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
469 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
472 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
476 * vdma_desc_write_64 - 64-bit descriptor write
477 * @chan: Driver specific VDMA channel
478 * @reg: Register to write
479 * @value_lsb: lower address of the descriptor.
480 * @value_msb: upper address of the descriptor.
482 * Since vdma driver is trying to write to a register offset which is not a
483 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
484 * instead of a single 64 bit register write.
486 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
487 u32 value_lsb, u32 value_msb)
489 /* Write the lsb 32 bits*/
490 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
492 /* Write the msb 32 bits */
493 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
496 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
498 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
501 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
505 dma_writeq(chan, reg, addr);
507 dma_ctrl_write(chan, reg, addr);
510 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
511 struct xilinx_axidma_desc_hw *hw,
512 dma_addr_t buf_addr, size_t sg_used,
515 if (chan->ext_addr) {
516 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
517 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
520 hw->buf_addr = buf_addr + sg_used + period_len;
524 /* -----------------------------------------------------------------------------
525 * Descriptors and segments alloc and free
529 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
530 * @chan: Driver specific DMA channel
532 * Return: The allocated segment on success and NULL on failure.
534 static struct xilinx_vdma_tx_segment *
535 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
537 struct xilinx_vdma_tx_segment *segment;
540 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
544 segment->phys = phys;
550 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
551 * @chan: Driver specific DMA channel
553 * Return: The allocated segment on success and NULL on failure.
555 static struct xilinx_cdma_tx_segment *
556 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
558 struct xilinx_cdma_tx_segment *segment;
561 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
565 segment->phys = phys;
571 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
572 * @chan: Driver specific DMA channel
574 * Return: The allocated segment on success and NULL on failure.
576 static struct xilinx_axidma_tx_segment *
577 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
579 struct xilinx_axidma_tx_segment *segment = NULL;
582 spin_lock_irqsave(&chan->lock, flags);
583 if (!list_empty(&chan->free_seg_list)) {
584 segment = list_first_entry(&chan->free_seg_list,
585 struct xilinx_axidma_tx_segment,
587 list_del(&segment->node);
589 spin_unlock_irqrestore(&chan->lock, flags);
594 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
596 u32 next_desc = hw->next_desc;
597 u32 next_desc_msb = hw->next_desc_msb;
599 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
601 hw->next_desc = next_desc;
602 hw->next_desc_msb = next_desc_msb;
606 * xilinx_dma_free_tx_segment - Free transaction segment
607 * @chan: Driver specific DMA channel
608 * @segment: DMA transaction segment
610 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
611 struct xilinx_axidma_tx_segment *segment)
613 xilinx_dma_clean_hw_desc(&segment->hw);
615 list_add_tail(&segment->node, &chan->free_seg_list);
619 * xilinx_cdma_free_tx_segment - Free transaction segment
620 * @chan: Driver specific DMA channel
621 * @segment: DMA transaction segment
623 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
624 struct xilinx_cdma_tx_segment *segment)
626 dma_pool_free(chan->desc_pool, segment, segment->phys);
630 * xilinx_vdma_free_tx_segment - Free transaction segment
631 * @chan: Driver specific DMA channel
632 * @segment: DMA transaction segment
634 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
635 struct xilinx_vdma_tx_segment *segment)
637 dma_pool_free(chan->desc_pool, segment, segment->phys);
641 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
642 * @chan: Driver specific DMA channel
644 * Return: The allocated descriptor on success and NULL on failure.
646 static struct xilinx_dma_tx_descriptor *
647 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
649 struct xilinx_dma_tx_descriptor *desc;
651 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
655 INIT_LIST_HEAD(&desc->segments);
661 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
662 * @chan: Driver specific DMA channel
663 * @desc: DMA transaction descriptor
666 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
667 struct xilinx_dma_tx_descriptor *desc)
669 struct xilinx_vdma_tx_segment *segment, *next;
670 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
671 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
676 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
677 list_for_each_entry_safe(segment, next, &desc->segments, node) {
678 list_del(&segment->node);
679 xilinx_vdma_free_tx_segment(chan, segment);
681 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
682 list_for_each_entry_safe(cdma_segment, cdma_next,
683 &desc->segments, node) {
684 list_del(&cdma_segment->node);
685 xilinx_cdma_free_tx_segment(chan, cdma_segment);
688 list_for_each_entry_safe(axidma_segment, axidma_next,
689 &desc->segments, node) {
690 list_del(&axidma_segment->node);
691 xilinx_dma_free_tx_segment(chan, axidma_segment);
698 /* Required functions */
701 * xilinx_dma_free_desc_list - Free descriptors list
702 * @chan: Driver specific DMA channel
703 * @list: List to parse and delete the descriptor
705 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
706 struct list_head *list)
708 struct xilinx_dma_tx_descriptor *desc, *next;
710 list_for_each_entry_safe(desc, next, list, node) {
711 list_del(&desc->node);
712 xilinx_dma_free_tx_descriptor(chan, desc);
717 * xilinx_dma_free_descriptors - Free channel descriptors
718 * @chan: Driver specific DMA channel
720 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
724 spin_lock_irqsave(&chan->lock, flags);
726 xilinx_dma_free_desc_list(chan, &chan->pending_list);
727 xilinx_dma_free_desc_list(chan, &chan->done_list);
728 xilinx_dma_free_desc_list(chan, &chan->active_list);
730 spin_unlock_irqrestore(&chan->lock, flags);
734 * xilinx_dma_free_chan_resources - Free channel resources
735 * @dchan: DMA channel
737 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
739 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
742 dev_dbg(chan->dev, "Free all channel resources.\n");
744 xilinx_dma_free_descriptors(chan);
746 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
747 spin_lock_irqsave(&chan->lock, flags);
748 INIT_LIST_HEAD(&chan->free_seg_list);
749 spin_unlock_irqrestore(&chan->lock, flags);
751 /* Free Memory that is allocated for cyclic DMA Mode */
752 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
753 chan->cyclic_seg_v, chan->cyclic_seg_p);
756 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
757 dma_pool_destroy(chan->desc_pool);
758 chan->desc_pool = NULL;
763 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
764 * @chan: Driver specific dma channel
765 * @desc: dma transaction descriptor
766 * @flags: flags for spin lock
768 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
769 struct xilinx_dma_tx_descriptor *desc,
770 unsigned long *flags)
772 dma_async_tx_callback callback;
773 void *callback_param;
775 callback = desc->async_tx.callback;
776 callback_param = desc->async_tx.callback_param;
778 spin_unlock_irqrestore(&chan->lock, *flags);
779 callback(callback_param);
780 spin_lock_irqsave(&chan->lock, *flags);
785 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
786 * @chan: Driver specific DMA channel
788 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
790 struct xilinx_dma_tx_descriptor *desc, *next;
793 spin_lock_irqsave(&chan->lock, flags);
795 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
796 struct dmaengine_desc_callback cb;
799 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
803 /* Remove from the list of running transactions */
804 list_del(&desc->node);
806 /* Run the link descriptor callback function */
807 dmaengine_desc_get_callback(&desc->async_tx, &cb);
808 if (dmaengine_desc_callback_valid(&cb)) {
809 spin_unlock_irqrestore(&chan->lock, flags);
810 dmaengine_desc_callback_invoke(&cb, NULL);
811 spin_lock_irqsave(&chan->lock, flags);
814 /* Run any dependencies, then free the descriptor */
815 dma_run_dependencies(&desc->async_tx);
816 xilinx_dma_free_tx_descriptor(chan, desc);
819 spin_unlock_irqrestore(&chan->lock, flags);
823 * xilinx_dma_do_tasklet - Schedule completion tasklet
824 * @data: Pointer to the Xilinx DMA channel structure
826 static void xilinx_dma_do_tasklet(unsigned long data)
828 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
830 xilinx_dma_chan_desc_cleanup(chan);
834 * xilinx_dma_alloc_chan_resources - Allocate channel resources
835 * @dchan: DMA channel
837 * Return: '0' on success and failure value on error
839 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
841 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
844 /* Has this channel already been allocated? */
849 * We need the descriptor to be aligned to 64bytes
850 * for meeting Xilinx VDMA specification requirement.
852 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
853 /* Allocate the buffer descriptors. */
854 chan->seg_v = dma_zalloc_coherent(chan->dev,
855 sizeof(*chan->seg_v) *
856 XILINX_DMA_NUM_DESCS,
857 &chan->seg_p, GFP_KERNEL);
860 "unable to allocate channel %d descriptors\n",
865 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
866 chan->seg_v[i].hw.next_desc =
867 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
868 ((i + 1) % XILINX_DMA_NUM_DESCS));
869 chan->seg_v[i].hw.next_desc_msb =
870 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
871 ((i + 1) % XILINX_DMA_NUM_DESCS));
872 chan->seg_v[i].phys = chan->seg_p +
873 sizeof(*chan->seg_v) * i;
874 list_add_tail(&chan->seg_v[i].node,
875 &chan->free_seg_list);
877 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
878 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
880 sizeof(struct xilinx_cdma_tx_segment),
881 __alignof__(struct xilinx_cdma_tx_segment),
884 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
886 sizeof(struct xilinx_vdma_tx_segment),
887 __alignof__(struct xilinx_vdma_tx_segment),
891 if (!chan->desc_pool &&
892 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
894 "unable to allocate channel %d descriptor pool\n",
899 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
901 * For cyclic DMA mode we need to program the tail Descriptor
902 * register with a value which is not a part of the BD chain
903 * so allocating a desc segment during channel allocation for
904 * programming tail descriptor.
906 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
907 sizeof(*chan->cyclic_seg_v),
908 &chan->cyclic_seg_p, GFP_KERNEL);
909 if (!chan->cyclic_seg_v) {
911 "unable to allocate desc segment for cyclic DMA\n");
914 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
917 dma_cookie_init(dchan);
919 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
920 /* For AXI DMA resetting once channel will reset the
921 * other channel as well so enable the interrupts here.
923 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
924 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
927 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
928 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
929 XILINX_CDMA_CR_SGMODE);
935 * xilinx_dma_tx_status - Get DMA transaction status
936 * @dchan: DMA channel
937 * @cookie: Transaction identifier
938 * @txstate: Transaction state
940 * Return: DMA transaction status
942 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
944 struct dma_tx_state *txstate)
946 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
947 struct xilinx_dma_tx_descriptor *desc;
948 struct xilinx_axidma_tx_segment *segment;
949 struct xilinx_axidma_desc_hw *hw;
954 ret = dma_cookie_status(dchan, cookie, txstate);
955 if (ret == DMA_COMPLETE || !txstate)
958 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
959 spin_lock_irqsave(&chan->lock, flags);
961 desc = list_last_entry(&chan->active_list,
962 struct xilinx_dma_tx_descriptor, node);
964 list_for_each_entry(segment, &desc->segments, node) {
966 residue += (hw->control - hw->status) &
967 XILINX_DMA_MAX_TRANS_LEN;
970 spin_unlock_irqrestore(&chan->lock, flags);
972 chan->residue = residue;
973 dma_set_residue(txstate, chan->residue);
980 * xilinx_dma_halt - Halt DMA channel
981 * @chan: Driver specific DMA channel
983 static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
988 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
990 /* Wait for the hardware to halt */
991 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
992 (val & XILINX_DMA_DMASR_HALTED), 0,
993 XILINX_DMA_LOOP_COUNT);
996 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
997 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1004 * xilinx_dma_start - Start DMA channel
1005 * @chan: Driver specific DMA channel
1007 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1012 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1014 /* Wait for the hardware to start */
1015 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1016 !(val & XILINX_DMA_DMASR_HALTED), 0,
1017 XILINX_DMA_LOOP_COUNT);
1020 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1021 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1028 * xilinx_vdma_start_transfer - Starts VDMA transfer
1029 * @chan: Driver specific channel struct pointer
1031 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1033 struct xilinx_vdma_config *config = &chan->config;
1034 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1036 struct xilinx_vdma_tx_segment *tail_segment;
1038 /* This function was invoked with lock held */
1045 if (list_empty(&chan->pending_list))
1048 desc = list_first_entry(&chan->pending_list,
1049 struct xilinx_dma_tx_descriptor, node);
1050 tail_desc = list_last_entry(&chan->pending_list,
1051 struct xilinx_dma_tx_descriptor, node);
1053 tail_segment = list_last_entry(&tail_desc->segments,
1054 struct xilinx_vdma_tx_segment, node);
1057 * If hardware is idle, then all descriptors on the running lists are
1058 * done, start new transfers
1061 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1062 desc->async_tx.phys);
1064 /* Configure the hardware using info in the config structure */
1065 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1067 if (config->frm_cnt_en)
1068 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1070 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1073 * With SG, start with circular mode, so that BDs can be fetched.
1074 * In direct register mode, if not parking, enable circular mode
1076 if (chan->has_sg || !config->park)
1077 reg |= XILINX_DMA_DMACR_CIRC_EN;
1080 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1082 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1084 j = chan->desc_submitcount;
1085 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1086 if (chan->direction == DMA_MEM_TO_DEV) {
1087 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1088 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1090 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1091 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1093 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1095 /* Start the hardware */
1096 xilinx_dma_start(chan);
1101 /* Start the transfer */
1103 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1104 tail_segment->phys);
1105 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1106 chan->desc_pendingcount = 0;
1108 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1111 if (chan->desc_submitcount < chan->num_frms)
1112 i = chan->desc_submitcount;
1114 list_for_each_entry(segment, &desc->segments, node) {
1116 vdma_desc_write_64(chan,
1117 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1118 segment->hw.buf_addr,
1119 segment->hw.buf_addr_msb);
1121 vdma_desc_write(chan,
1122 XILINX_VDMA_REG_START_ADDRESS(i++),
1123 segment->hw.buf_addr);
1131 /* HW expects these parameters to be same for one transaction */
1132 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1133 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1135 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1137 chan->desc_submitcount++;
1138 chan->desc_pendingcount--;
1139 list_del(&desc->node);
1140 list_add_tail(&desc->node, &chan->active_list);
1141 if (chan->desc_submitcount == chan->num_frms)
1142 chan->desc_submitcount = 0;
1149 * xilinx_cdma_start_transfer - Starts cdma transfer
1150 * @chan: Driver specific channel struct pointer
1152 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1154 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1155 struct xilinx_cdma_tx_segment *tail_segment;
1156 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1164 if (list_empty(&chan->pending_list))
1167 head_desc = list_first_entry(&chan->pending_list,
1168 struct xilinx_dma_tx_descriptor, node);
1169 tail_desc = list_last_entry(&chan->pending_list,
1170 struct xilinx_dma_tx_descriptor, node);
1171 tail_segment = list_last_entry(&tail_desc->segments,
1172 struct xilinx_cdma_tx_segment, node);
1174 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1175 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1176 ctrl_reg |= chan->desc_pendingcount <<
1177 XILINX_DMA_CR_COALESCE_SHIFT;
1178 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1182 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1183 head_desc->async_tx.phys);
1185 /* Update tail ptr register which will start the transfer */
1186 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1187 tail_segment->phys);
1189 /* In simple mode */
1190 struct xilinx_cdma_tx_segment *segment;
1191 struct xilinx_cdma_desc_hw *hw;
1193 segment = list_first_entry(&head_desc->segments,
1194 struct xilinx_cdma_tx_segment,
1199 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1200 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1202 /* Start the transfer */
1203 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1204 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1207 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1208 chan->desc_pendingcount = 0;
1213 * xilinx_dma_start_transfer - Starts DMA transfer
1214 * @chan: Driver specific channel struct pointer
1216 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1218 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1219 struct xilinx_axidma_tx_segment *tail_segment;
1228 if (list_empty(&chan->pending_list))
1231 head_desc = list_first_entry(&chan->pending_list,
1232 struct xilinx_dma_tx_descriptor, node);
1233 tail_desc = list_last_entry(&chan->pending_list,
1234 struct xilinx_dma_tx_descriptor, node);
1235 tail_segment = list_last_entry(&tail_desc->segments,
1236 struct xilinx_axidma_tx_segment, node);
1238 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1240 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1241 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1242 reg |= chan->desc_pendingcount <<
1243 XILINX_DMA_CR_COALESCE_SHIFT;
1244 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1247 if (chan->has_sg && !chan->xdev->mcdma)
1248 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1249 head_desc->async_tx.phys);
1251 if (chan->has_sg && chan->xdev->mcdma) {
1252 if (chan->direction == DMA_MEM_TO_DEV) {
1253 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1254 head_desc->async_tx.phys);
1257 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1258 head_desc->async_tx.phys);
1260 dma_ctrl_write(chan,
1261 XILINX_DMA_MCRX_CDESC(chan->tdest),
1262 head_desc->async_tx.phys);
1267 xilinx_dma_start(chan);
1272 /* Start the transfer */
1273 if (chan->has_sg && !chan->xdev->mcdma) {
1275 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1276 chan->cyclic_seg_v->phys);
1278 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1279 tail_segment->phys);
1280 } else if (chan->has_sg && chan->xdev->mcdma) {
1281 if (chan->direction == DMA_MEM_TO_DEV) {
1282 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1283 tail_segment->phys);
1286 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1287 tail_segment->phys);
1289 dma_ctrl_write(chan,
1290 XILINX_DMA_MCRX_TDESC(chan->tdest),
1291 tail_segment->phys);
1295 struct xilinx_axidma_tx_segment *segment;
1296 struct xilinx_axidma_desc_hw *hw;
1298 segment = list_first_entry(&head_desc->segments,
1299 struct xilinx_axidma_tx_segment,
1303 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1305 /* Start the transfer */
1306 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1307 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1310 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1311 chan->desc_pendingcount = 0;
1316 * xilinx_dma_issue_pending - Issue pending transactions
1317 * @dchan: DMA channel
1319 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1321 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1322 unsigned long flags;
1324 spin_lock_irqsave(&chan->lock, flags);
1325 chan->start_transfer(chan);
1326 spin_unlock_irqrestore(&chan->lock, flags);
1330 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1331 * @chan : xilinx DMA channel
1335 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1337 struct xilinx_dma_tx_descriptor *desc, *next;
1339 /* This function was invoked with lock held */
1340 if (list_empty(&chan->active_list))
1343 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1344 list_del(&desc->node);
1346 dma_cookie_complete(&desc->async_tx);
1347 list_add_tail(&desc->node, &chan->done_list);
1352 * xilinx_dma_reset - Reset DMA channel
1353 * @chan: Driver specific DMA channel
1355 * Return: '0' on success and failure value on error
1357 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1362 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1364 /* Wait for the hardware to finish reset */
1365 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1366 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1367 XILINX_DMA_LOOP_COUNT);
1370 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1371 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1372 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1378 chan->desc_submitcount = 0;
1384 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1385 * @chan: Driver specific DMA channel
1387 * Return: '0' on success and failure value on error
1389 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1394 err = xilinx_dma_reset(chan);
1398 /* Enable interrupts */
1399 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1400 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1406 * xilinx_dma_irq_handler - DMA Interrupt handler
1408 * @data: Pointer to the Xilinx DMA channel structure
1410 * Return: IRQ_HANDLED/IRQ_NONE
1412 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1414 struct xilinx_dma_chan *chan = data;
1417 /* Read the status and ack the interrupts. */
1418 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1419 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1422 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1423 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1425 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1427 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1428 * error is recoverable, ignore it. Otherwise flag the error.
1430 * Only recoverable errors can be cleared in the DMASR register,
1431 * make sure not to write to other error bits to 1.
1433 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1435 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1436 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1438 if (!chan->flush_on_fsync ||
1439 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1441 "Channel %p has errors %x, cdr %x tdr %x\n",
1443 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1444 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1449 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1451 * Device takes too long to do the transfer when user requires
1454 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1457 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1458 spin_lock(&chan->lock);
1459 xilinx_dma_complete_descriptor(chan);
1461 chan->start_transfer(chan);
1462 spin_unlock(&chan->lock);
1465 tasklet_schedule(&chan->tasklet);
1470 * append_desc_queue - Queuing descriptor
1471 * @chan: Driver specific dma channel
1472 * @desc: dma transaction descriptor
1474 static void append_desc_queue(struct xilinx_dma_chan *chan,
1475 struct xilinx_dma_tx_descriptor *desc)
1477 struct xilinx_vdma_tx_segment *tail_segment;
1478 struct xilinx_dma_tx_descriptor *tail_desc;
1479 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1480 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1482 if (list_empty(&chan->pending_list))
1486 * Add the hardware descriptor to the chain of hardware descriptors
1487 * that already exists in memory.
1489 tail_desc = list_last_entry(&chan->pending_list,
1490 struct xilinx_dma_tx_descriptor, node);
1491 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1492 tail_segment = list_last_entry(&tail_desc->segments,
1493 struct xilinx_vdma_tx_segment,
1495 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1496 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1497 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1498 struct xilinx_cdma_tx_segment,
1500 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1502 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1503 struct xilinx_axidma_tx_segment,
1505 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1509 * Add the software descriptor and all children to the list
1510 * of pending transactions
1513 list_add_tail(&desc->node, &chan->pending_list);
1514 chan->desc_pendingcount++;
1516 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1517 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1518 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1519 chan->desc_pendingcount = chan->num_frms;
1524 * xilinx_dma_tx_submit - Submit DMA transaction
1525 * @tx: Async transaction descriptor
1527 * Return: cookie value on success and failure value on error
1529 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1531 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1532 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1533 dma_cookie_t cookie;
1534 unsigned long flags;
1538 xilinx_dma_free_tx_descriptor(chan, desc);
1544 * If reset fails, need to hard reset the system.
1545 * Channel is no longer functional
1547 err = xilinx_dma_chan_reset(chan);
1552 spin_lock_irqsave(&chan->lock, flags);
1554 cookie = dma_cookie_assign(tx);
1556 /* Put this transaction onto the tail of the pending queue */
1557 append_desc_queue(chan, desc);
1560 chan->cyclic = true;
1562 spin_unlock_irqrestore(&chan->lock, flags);
1568 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1569 * DMA_SLAVE transaction
1570 * @dchan: DMA channel
1571 * @xt: Interleaved template pointer
1572 * @flags: transfer ack flags
1574 * Return: Async transaction descriptor on success and NULL on failure
1576 static struct dma_async_tx_descriptor *
1577 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1578 struct dma_interleaved_template *xt,
1579 unsigned long flags)
1581 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1582 struct xilinx_dma_tx_descriptor *desc;
1583 struct xilinx_vdma_tx_segment *segment, *prev = NULL;
1584 struct xilinx_vdma_desc_hw *hw;
1586 if (!is_slave_direction(xt->dir))
1589 if (!xt->numf || !xt->sgl[0].size)
1592 if (xt->frame_size != 1)
1595 /* Allocate a transaction descriptor. */
1596 desc = xilinx_dma_alloc_tx_descriptor(chan);
1600 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1601 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1602 async_tx_ack(&desc->async_tx);
1604 /* Allocate the link descriptor from DMA pool */
1605 segment = xilinx_vdma_alloc_tx_segment(chan);
1609 /* Fill in the hardware descriptor */
1611 hw->vsize = xt->numf;
1612 hw->hsize = xt->sgl[0].size;
1613 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1614 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1615 hw->stride |= chan->config.frm_dly <<
1616 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1618 if (xt->dir != DMA_MEM_TO_DEV) {
1619 if (chan->ext_addr) {
1620 hw->buf_addr = lower_32_bits(xt->dst_start);
1621 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1623 hw->buf_addr = xt->dst_start;
1626 if (chan->ext_addr) {
1627 hw->buf_addr = lower_32_bits(xt->src_start);
1628 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1630 hw->buf_addr = xt->src_start;
1634 /* Insert the segment into the descriptor segments list. */
1635 list_add_tail(&segment->node, &desc->segments);
1639 /* Link the last hardware descriptor with the first. */
1640 segment = list_first_entry(&desc->segments,
1641 struct xilinx_vdma_tx_segment, node);
1642 desc->async_tx.phys = segment->phys;
1644 return &desc->async_tx;
1647 xilinx_dma_free_tx_descriptor(chan, desc);
1652 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1653 * @dchan: DMA channel
1654 * @dma_dst: destination address
1655 * @dma_src: source address
1656 * @len: transfer length
1657 * @flags: transfer ack flags
1659 * Return: Async transaction descriptor on success and NULL on failure
1661 static struct dma_async_tx_descriptor *
1662 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1663 dma_addr_t dma_src, size_t len, unsigned long flags)
1665 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1666 struct xilinx_dma_tx_descriptor *desc;
1667 struct xilinx_cdma_tx_segment *segment, *prev;
1668 struct xilinx_cdma_desc_hw *hw;
1670 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1673 desc = xilinx_dma_alloc_tx_descriptor(chan);
1677 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1678 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1680 /* Allocate the link descriptor from DMA pool */
1681 segment = xilinx_cdma_alloc_tx_segment(chan);
1687 hw->src_addr = dma_src;
1688 hw->dest_addr = dma_dst;
1689 if (chan->ext_addr) {
1690 hw->src_addr_msb = upper_32_bits(dma_src);
1691 hw->dest_addr_msb = upper_32_bits(dma_dst);
1694 /* Fill the previous next descriptor with current */
1695 prev = list_last_entry(&desc->segments,
1696 struct xilinx_cdma_tx_segment, node);
1697 prev->hw.next_desc = segment->phys;
1699 /* Insert the segment into the descriptor segments list. */
1700 list_add_tail(&segment->node, &desc->segments);
1704 /* Link the last hardware descriptor with the first. */
1705 segment = list_first_entry(&desc->segments,
1706 struct xilinx_cdma_tx_segment, node);
1707 desc->async_tx.phys = segment->phys;
1708 prev->hw.next_desc = segment->phys;
1710 return &desc->async_tx;
1713 xilinx_dma_free_tx_descriptor(chan, desc);
1718 * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction
1719 * @dchan: DMA channel
1720 * @dst_sg: Destination scatter list
1721 * @dst_sg_len: Number of entries in destination scatter list
1722 * @src_sg: Source scatter list
1723 * @src_sg_len: Number of entries in source scatter list
1724 * @flags: transfer ack flags
1726 * Return: Async transaction descriptor on success and NULL on failure
1728 static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
1729 struct dma_chan *dchan, struct scatterlist *dst_sg,
1730 unsigned int dst_sg_len, struct scatterlist *src_sg,
1731 unsigned int src_sg_len, unsigned long flags)
1733 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1734 struct xilinx_dma_tx_descriptor *desc;
1735 struct xilinx_cdma_tx_segment *segment, *prev = NULL;
1736 struct xilinx_cdma_desc_hw *hw;
1737 size_t len, dst_avail, src_avail;
1738 dma_addr_t dma_dst, dma_src;
1740 if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
1743 if (unlikely(dst_sg == NULL || src_sg == NULL))
1746 desc = xilinx_dma_alloc_tx_descriptor(chan);
1750 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1751 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1753 dst_avail = sg_dma_len(dst_sg);
1754 src_avail = sg_dma_len(src_sg);
1756 * loop until there is either no more source or no more destination
1760 len = min_t(size_t, src_avail, dst_avail);
1761 len = min_t(size_t, len, XILINX_DMA_MAX_TRANS_LEN);
1765 /* Allocate the link descriptor from DMA pool */
1766 segment = xilinx_cdma_alloc_tx_segment(chan);
1770 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
1772 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
1776 hw->src_addr = dma_src;
1777 hw->dest_addr = dma_dst;
1778 if (chan->ext_addr) {
1779 hw->src_addr_msb = upper_32_bits(dma_src);
1780 hw->dest_addr_msb = upper_32_bits(dma_dst);
1784 prev->hw.next_desc = segment->phys;
1789 list_add_tail(&segment->node, &desc->segments);
1792 /* Fetch the next dst scatterlist entry */
1793 if (dst_avail == 0) {
1794 if (dst_sg_len == 0)
1796 dst_sg = sg_next(dst_sg);
1800 dst_avail = sg_dma_len(dst_sg);
1802 /* Fetch the next src scatterlist entry */
1803 if (src_avail == 0) {
1804 if (src_sg_len == 0)
1806 src_sg = sg_next(src_sg);
1810 src_avail = sg_dma_len(src_sg);
1814 /* Link the last hardware descriptor with the first. */
1815 segment = list_first_entry(&desc->segments,
1816 struct xilinx_cdma_tx_segment, node);
1817 desc->async_tx.phys = segment->phys;
1818 prev->hw.next_desc = segment->phys;
1820 return &desc->async_tx;
1823 xilinx_dma_free_tx_descriptor(chan, desc);
1828 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1829 * @dchan: DMA channel
1830 * @sgl: scatterlist to transfer to/from
1831 * @sg_len: number of entries in @scatterlist
1832 * @direction: DMA direction
1833 * @flags: transfer ack flags
1834 * @context: APP words of the descriptor
1836 * Return: Async transaction descriptor on success and NULL on failure
1838 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1839 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1840 enum dma_transfer_direction direction, unsigned long flags,
1843 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1844 struct xilinx_dma_tx_descriptor *desc;
1845 struct xilinx_axidma_tx_segment *segment = NULL;
1846 u32 *app_w = (u32 *)context;
1847 struct scatterlist *sg;
1852 if (!is_slave_direction(direction))
1855 /* Allocate a transaction descriptor. */
1856 desc = xilinx_dma_alloc_tx_descriptor(chan);
1860 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1861 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1863 /* Build transactions using information in the scatter gather list */
1864 for_each_sg(sgl, sg, sg_len, i) {
1867 /* Loop until the entire scatterlist entry is used */
1868 while (sg_used < sg_dma_len(sg)) {
1869 struct xilinx_axidma_desc_hw *hw;
1871 /* Get a free segment */
1872 segment = xilinx_axidma_alloc_tx_segment(chan);
1877 * Calculate the maximum number of bytes to transfer,
1878 * making sure it is less than the hw limit
1880 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1881 XILINX_DMA_MAX_TRANS_LEN);
1884 /* Fill in the descriptor */
1885 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1890 if (chan->direction == DMA_MEM_TO_DEV) {
1892 memcpy(hw->app, app_w, sizeof(u32) *
1893 XILINX_DMA_NUM_APP_WORDS);
1899 * Insert the segment into the descriptor segments
1902 list_add_tail(&segment->node, &desc->segments);
1906 segment = list_first_entry(&desc->segments,
1907 struct xilinx_axidma_tx_segment, node);
1908 desc->async_tx.phys = segment->phys;
1910 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1911 if (chan->direction == DMA_MEM_TO_DEV) {
1912 segment->hw.control |= XILINX_DMA_BD_SOP;
1913 segment = list_last_entry(&desc->segments,
1914 struct xilinx_axidma_tx_segment,
1916 segment->hw.control |= XILINX_DMA_BD_EOP;
1919 return &desc->async_tx;
1922 xilinx_dma_free_tx_descriptor(chan, desc);
1927 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1928 * @dchan: DMA channel
1929 * @buf_addr: Physical address of the buffer
1930 * @buf_len: Total length of the cyclic buffers
1931 * @period_len: length of individual cyclic buffer
1932 * @direction: DMA direction
1933 * @flags: transfer ack flags
1935 * Return: Async transaction descriptor on success and NULL on failure
1937 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1938 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1939 size_t period_len, enum dma_transfer_direction direction,
1940 unsigned long flags)
1942 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1943 struct xilinx_dma_tx_descriptor *desc;
1944 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1945 size_t copy, sg_used;
1946 unsigned int num_periods;
1953 num_periods = buf_len / period_len;
1958 if (!is_slave_direction(direction))
1961 /* Allocate a transaction descriptor. */
1962 desc = xilinx_dma_alloc_tx_descriptor(chan);
1966 chan->direction = direction;
1967 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1968 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1970 for (i = 0; i < num_periods; ++i) {
1973 while (sg_used < period_len) {
1974 struct xilinx_axidma_desc_hw *hw;
1976 /* Get a free segment */
1977 segment = xilinx_axidma_alloc_tx_segment(chan);
1982 * Calculate the maximum number of bytes to transfer,
1983 * making sure it is less than the hw limit
1985 copy = min_t(size_t, period_len - sg_used,
1986 XILINX_DMA_MAX_TRANS_LEN);
1988 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1993 prev->hw.next_desc = segment->phys;
1999 * Insert the segment into the descriptor segments
2002 list_add_tail(&segment->node, &desc->segments);
2006 head_segment = list_first_entry(&desc->segments,
2007 struct xilinx_axidma_tx_segment, node);
2008 desc->async_tx.phys = head_segment->phys;
2010 desc->cyclic = true;
2011 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2012 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2013 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2015 segment = list_last_entry(&desc->segments,
2016 struct xilinx_axidma_tx_segment,
2018 segment->hw.next_desc = (u32) head_segment->phys;
2020 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2021 if (direction == DMA_MEM_TO_DEV) {
2022 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2023 segment->hw.control |= XILINX_DMA_BD_EOP;
2026 return &desc->async_tx;
2029 xilinx_dma_free_tx_descriptor(chan, desc);
2034 * xilinx_dma_prep_interleaved - prepare a descriptor for a
2035 * DMA_SLAVE transaction
2036 * @dchan: DMA channel
2037 * @xt: Interleaved template pointer
2038 * @flags: transfer ack flags
2040 * Return: Async transaction descriptor on success and NULL on failure
2042 static struct dma_async_tx_descriptor *
2043 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
2044 struct dma_interleaved_template *xt,
2045 unsigned long flags)
2047 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2048 struct xilinx_dma_tx_descriptor *desc;
2049 struct xilinx_axidma_tx_segment *segment;
2050 struct xilinx_axidma_desc_hw *hw;
2052 if (!is_slave_direction(xt->dir))
2055 if (!xt->numf || !xt->sgl[0].size)
2058 if (xt->frame_size != 1)
2061 /* Allocate a transaction descriptor. */
2062 desc = xilinx_dma_alloc_tx_descriptor(chan);
2066 chan->direction = xt->dir;
2067 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2068 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2070 /* Get a free segment */
2071 segment = xilinx_axidma_alloc_tx_segment(chan);
2077 /* Fill in the descriptor */
2078 if (xt->dir != DMA_MEM_TO_DEV)
2079 hw->buf_addr = xt->dst_start;
2081 hw->buf_addr = xt->src_start;
2083 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2084 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2085 XILINX_DMA_BD_VSIZE_MASK;
2086 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2087 XILINX_DMA_BD_STRIDE_MASK;
2088 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2091 * Insert the segment into the descriptor segments
2094 list_add_tail(&segment->node, &desc->segments);
2097 segment = list_first_entry(&desc->segments,
2098 struct xilinx_axidma_tx_segment, node);
2099 desc->async_tx.phys = segment->phys;
2101 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2102 if (xt->dir == DMA_MEM_TO_DEV) {
2103 segment->hw.control |= XILINX_DMA_BD_SOP;
2104 segment = list_last_entry(&desc->segments,
2105 struct xilinx_axidma_tx_segment,
2107 segment->hw.control |= XILINX_DMA_BD_EOP;
2110 return &desc->async_tx;
2113 xilinx_dma_free_tx_descriptor(chan, desc);
2118 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2119 * @dchan: Driver specific DMA Channel pointer
2121 * Return: '0' always.
2123 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2125 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2129 xilinx_dma_chan_reset(chan);
2131 /* Halt the DMA engine */
2132 xilinx_dma_halt(chan);
2134 /* Remove and free all of the descriptors in the lists */
2135 xilinx_dma_free_descriptors(chan);
2138 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2139 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2140 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2141 chan->cyclic = false;
2148 * xilinx_dma_channel_set_config - Configure VDMA channel
2149 * Run-time configuration for Axi VDMA, supports:
2150 * . halt the channel
2151 * . configure interrupt coalescing and inter-packet delay threshold
2152 * . start/stop parking
2155 * @dchan: DMA channel
2156 * @cfg: VDMA device configuration pointer
2158 * Return: '0' on success and failure value on error
2160 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2161 struct xilinx_vdma_config *cfg)
2163 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2167 return xilinx_dma_chan_reset(chan);
2169 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2171 chan->config.frm_dly = cfg->frm_dly;
2172 chan->config.park = cfg->park;
2174 /* genlock settings */
2175 chan->config.gen_lock = cfg->gen_lock;
2176 chan->config.master = cfg->master;
2178 if (cfg->gen_lock && chan->genlock) {
2179 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2180 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2183 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2185 chan->config.park_frm = cfg->park_frm;
2187 chan->config.park_frm = -1;
2189 chan->config.coalesc = cfg->coalesc;
2190 chan->config.delay = cfg->delay;
2192 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2193 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2194 chan->config.coalesc = cfg->coalesc;
2197 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2198 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2199 chan->config.delay = cfg->delay;
2202 /* FSync Source selection */
2203 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2204 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2206 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2210 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2212 /* -----------------------------------------------------------------------------
2217 * xilinx_dma_chan_remove - Per Channel remove function
2218 * @chan: Driver specific DMA channel
2220 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2222 /* Disable all interrupts */
2223 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2224 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2227 free_irq(chan->irq, chan);
2229 tasklet_kill(&chan->tasklet);
2231 list_del(&chan->common.device_node);
2234 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2235 struct clk **tx_clk, struct clk **rx_clk,
2236 struct clk **sg_clk, struct clk **tmp_clk)
2242 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2243 if (IS_ERR(*axi_clk)) {
2244 err = PTR_ERR(*axi_clk);
2245 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2249 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2250 if (IS_ERR(*tx_clk))
2253 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2254 if (IS_ERR(*rx_clk))
2257 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2258 if (IS_ERR(*sg_clk))
2261 err = clk_prepare_enable(*axi_clk);
2263 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2267 err = clk_prepare_enable(*tx_clk);
2269 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2270 goto err_disable_axiclk;
2273 err = clk_prepare_enable(*rx_clk);
2275 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2276 goto err_disable_txclk;
2279 err = clk_prepare_enable(*sg_clk);
2281 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
2282 goto err_disable_rxclk;
2288 clk_disable_unprepare(*rx_clk);
2290 clk_disable_unprepare(*tx_clk);
2292 clk_disable_unprepare(*axi_clk);
2297 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2298 struct clk **dev_clk, struct clk **tmp_clk,
2299 struct clk **tmp1_clk, struct clk **tmp2_clk)
2307 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2308 if (IS_ERR(*axi_clk)) {
2309 err = PTR_ERR(*axi_clk);
2310 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
2314 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2315 if (IS_ERR(*dev_clk)) {
2316 err = PTR_ERR(*dev_clk);
2317 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
2321 err = clk_prepare_enable(*axi_clk);
2323 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2327 err = clk_prepare_enable(*dev_clk);
2329 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
2330 goto err_disable_axiclk;
2336 clk_disable_unprepare(*axi_clk);
2341 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2342 struct clk **tx_clk, struct clk **txs_clk,
2343 struct clk **rx_clk, struct clk **rxs_clk)
2347 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2348 if (IS_ERR(*axi_clk)) {
2349 err = PTR_ERR(*axi_clk);
2350 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2354 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2355 if (IS_ERR(*tx_clk))
2358 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2359 if (IS_ERR(*txs_clk))
2362 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2363 if (IS_ERR(*rx_clk))
2366 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2367 if (IS_ERR(*rxs_clk))
2370 err = clk_prepare_enable(*axi_clk);
2372 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2376 err = clk_prepare_enable(*tx_clk);
2378 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2379 goto err_disable_axiclk;
2382 err = clk_prepare_enable(*txs_clk);
2384 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
2385 goto err_disable_txclk;
2388 err = clk_prepare_enable(*rx_clk);
2390 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2391 goto err_disable_txsclk;
2394 err = clk_prepare_enable(*rxs_clk);
2396 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
2397 goto err_disable_rxclk;
2403 clk_disable_unprepare(*rx_clk);
2405 clk_disable_unprepare(*txs_clk);
2407 clk_disable_unprepare(*tx_clk);
2409 clk_disable_unprepare(*axi_clk);
2414 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2416 clk_disable_unprepare(xdev->rxs_clk);
2417 clk_disable_unprepare(xdev->rx_clk);
2418 clk_disable_unprepare(xdev->txs_clk);
2419 clk_disable_unprepare(xdev->tx_clk);
2420 clk_disable_unprepare(xdev->axi_clk);
2424 * xilinx_dma_chan_probe - Per Channel Probing
2425 * It get channel features from the device tree entry and
2426 * initialize special channel handling routines
2428 * @xdev: Driver specific device structure
2429 * @node: Device node
2430 * @chan_id: DMA Channel id
2432 * Return: '0' on success and failure value on error
2434 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2435 struct device_node *node, int chan_id)
2437 struct xilinx_dma_chan *chan;
2438 bool has_dre = false;
2442 /* Allocate and initialize the channel structure */
2443 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2447 chan->dev = xdev->dev;
2449 chan->has_sg = xdev->has_sg;
2450 chan->desc_pendingcount = 0x0;
2451 chan->ext_addr = xdev->ext_addr;
2452 /* This variable enusres that descripotrs are not
2453 * Submited when dma engine is in progress. This variable is
2454 * Added to avoid pollling for a bit in the status register to
2455 * Know dma state in the driver hot path.
2459 spin_lock_init(&chan->lock);
2460 INIT_LIST_HEAD(&chan->pending_list);
2461 INIT_LIST_HEAD(&chan->done_list);
2462 INIT_LIST_HEAD(&chan->active_list);
2463 INIT_LIST_HEAD(&chan->free_seg_list);
2465 /* Retrieve the channel properties from the device tree */
2466 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2468 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2470 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2472 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2475 width = value >> 3; /* Convert bits to bytes */
2477 /* If data width is greater than 8 bytes, DRE is not in hw */
2482 xdev->common.copy_align = fls(width - 1);
2484 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2485 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2486 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2487 chan->direction = DMA_MEM_TO_DEV;
2489 chan->tdest = chan_id;
2490 xdev->common.directions = BIT(DMA_MEM_TO_DEV);
2492 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2493 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2494 chan->config.park = 1;
2495 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2497 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2498 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2499 chan->flush_on_fsync = true;
2501 } else if (of_device_is_compatible(node,
2502 "xlnx,axi-vdma-s2mm-channel") ||
2503 of_device_is_compatible(node,
2504 "xlnx,axi-dma-s2mm-channel")) {
2505 chan->direction = DMA_DEV_TO_MEM;
2507 chan->tdest = chan_id - xdev->nr_channels;
2508 xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
2510 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2511 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2512 chan->config.park = 1;
2513 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2515 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2516 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2517 chan->flush_on_fsync = true;
2520 dev_err(xdev->dev, "Invalid channel compatible node\n");
2524 /* Request the interrupt */
2525 chan->irq = irq_of_parse_and_map(node, 0);
2526 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2527 "xilinx-dma-controller", chan);
2529 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2533 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2534 chan->start_transfer = xilinx_dma_start_transfer;
2535 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2536 chan->start_transfer = xilinx_cdma_start_transfer;
2538 chan->start_transfer = xilinx_vdma_start_transfer;
2540 /* Initialize the tasklet */
2541 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2542 (unsigned long)chan);
2545 * Initialize the DMA channel and add it to the DMA engine channels
2548 chan->common.device = &xdev->common;
2550 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2551 xdev->chan[chan->id] = chan;
2553 /* Reset the channel */
2554 err = xilinx_dma_chan_reset(chan);
2556 dev_err(xdev->dev, "Reset channel failed\n");
2564 * xilinx_dma_child_probe - Per child node probe
2565 * It get number of dma-channels per child node from
2566 * device-tree and initializes all the channels.
2568 * @xdev: Driver specific device structure
2569 * @node: Device node
2573 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2574 struct device_node *node) {
2575 int ret, i, nr_channels = 1;
2577 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2578 if ((ret < 0) && xdev->mcdma)
2579 dev_warn(xdev->dev, "missing dma-channels property\n");
2581 for (i = 0; i < nr_channels; i++)
2582 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2584 xdev->nr_channels += nr_channels;
2590 * of_dma_xilinx_xlate - Translation function
2591 * @dma_spec: Pointer to DMA specifier as found in the device tree
2592 * @ofdma: Pointer to DMA controller data
2594 * Return: DMA channel pointer on success and NULL on error
2596 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2597 struct of_dma *ofdma)
2599 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2600 int chan_id = dma_spec->args[0];
2602 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2605 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2608 static const struct xilinx_dma_config axidma_config = {
2609 .dmatype = XDMA_TYPE_AXIDMA,
2610 .clk_init = axidma_clk_init,
2613 static const struct xilinx_dma_config axicdma_config = {
2614 .dmatype = XDMA_TYPE_CDMA,
2615 .clk_init = axicdma_clk_init,
2618 static const struct xilinx_dma_config axivdma_config = {
2619 .dmatype = XDMA_TYPE_VDMA,
2620 .clk_init = axivdma_clk_init,
2623 static const struct of_device_id xilinx_dma_of_ids[] = {
2624 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2625 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2626 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2629 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2632 * xilinx_dma_probe - Driver probe function
2633 * @pdev: Pointer to the platform_device structure
2635 * Return: '0' on success and failure value on error
2637 static int xilinx_dma_probe(struct platform_device *pdev)
2639 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2640 struct clk **, struct clk **, struct clk **)
2642 struct device_node *node = pdev->dev.of_node;
2643 struct xilinx_dma_device *xdev;
2644 struct device_node *child, *np = pdev->dev.of_node;
2645 struct resource *io;
2646 u32 num_frames, addr_width;
2649 /* Allocate and initialize the DMA engine structure */
2650 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2654 xdev->dev = &pdev->dev;
2656 const struct of_device_id *match;
2658 match = of_match_node(xilinx_dma_of_ids, np);
2659 if (match && match->data) {
2660 xdev->dma_config = match->data;
2661 clk_init = xdev->dma_config->clk_init;
2665 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2666 &xdev->rx_clk, &xdev->rxs_clk);
2670 /* Request and map I/O memory */
2671 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2672 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2673 if (IS_ERR(xdev->regs))
2674 return PTR_ERR(xdev->regs);
2676 /* Retrieve the DMA engine properties from the device tree */
2677 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2678 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2679 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2681 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2682 err = of_property_read_u32(node, "xlnx,num-fstores",
2686 "missing xlnx,num-fstores property\n");
2690 err = of_property_read_u32(node, "xlnx,flush-fsync",
2691 &xdev->flush_on_fsync);
2694 "missing xlnx,flush-fsync property\n");
2697 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2699 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2701 if (addr_width > 32)
2702 xdev->ext_addr = true;
2704 xdev->ext_addr = false;
2706 /* Set the dma mask bits */
2707 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2709 /* Initialize the DMA engine */
2710 xdev->common.dev = &pdev->dev;
2712 INIT_LIST_HEAD(&xdev->common.channels);
2713 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2714 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2715 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2718 xdev->common.dst_addr_widths = BIT(addr_width / 8);
2719 xdev->common.src_addr_widths = BIT(addr_width / 8);
2720 xdev->common.device_alloc_chan_resources =
2721 xilinx_dma_alloc_chan_resources;
2722 xdev->common.device_free_chan_resources =
2723 xilinx_dma_free_chan_resources;
2724 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2725 xdev->common.device_tx_status = xilinx_dma_tx_status;
2726 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2727 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2728 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2729 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2730 xdev->common.device_prep_dma_cyclic =
2731 xilinx_dma_prep_dma_cyclic;
2732 xdev->common.device_prep_interleaved_dma =
2733 xilinx_dma_prep_interleaved;
2734 /* Residue calculation is supported by only AXI DMA */
2735 xdev->common.residue_granularity =
2736 DMA_RESIDUE_GRANULARITY_SEGMENT;
2737 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2738 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2739 dma_cap_set(DMA_SG, xdev->common.cap_mask);
2740 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2741 xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
2743 xdev->common.device_prep_interleaved_dma =
2744 xilinx_vdma_dma_prep_interleaved;
2747 platform_set_drvdata(pdev, xdev);
2749 /* Initialize the channels */
2750 for_each_child_of_node(node, child) {
2751 err = xilinx_dma_child_probe(xdev, child);
2756 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2757 for (i = 0; i < xdev->nr_channels; i++)
2759 xdev->chan[i]->num_frms = num_frames;
2762 /* Register the DMA engine with the core */
2763 dma_async_device_register(&xdev->common);
2765 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2768 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2769 dma_async_device_unregister(&xdev->common);
2773 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2778 xdma_disable_allclks(xdev);
2780 for (i = 0; i < xdev->nr_channels; i++)
2782 xilinx_dma_chan_remove(xdev->chan[i]);
2788 * xilinx_dma_remove - Driver remove function
2789 * @pdev: Pointer to the platform_device structure
2791 * Return: Always '0'
2793 static int xilinx_dma_remove(struct platform_device *pdev)
2795 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2798 of_dma_controller_free(pdev->dev.of_node);
2800 dma_async_device_unregister(&xdev->common);
2802 for (i = 0; i < xdev->nr_channels; i++)
2804 xilinx_dma_chan_remove(xdev->chan[i]);
2806 xdma_disable_allclks(xdev);
2811 static struct platform_driver xilinx_vdma_driver = {
2813 .name = "xilinx-vdma",
2814 .of_match_table = xilinx_dma_of_ids,
2816 .probe = xilinx_dma_probe,
2817 .remove = xilinx_dma_remove,
2820 module_platform_driver(xilinx_vdma_driver);
2822 MODULE_AUTHOR("Xilinx, Inc.");
2823 MODULE_DESCRIPTION("Xilinx VDMA driver");
2824 MODULE_LICENSE("GPL v2");