2 * Xilinx DMA Engine support
4 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * This driver supports three Xilinx DMA engines:
10 * . Axi CDMA engine, it does transfers between memory and memory, it
11 * only has one channel.
12 * . Axi DMA engine, it does transfers between memory and device. It can be
13 * configured to have one channel or two channels. If configured as two
14 * channels, one is to transmit to a device and another is to receive from
16 * . Axi VDMA engine, it does transfers between memory and video devices.
17 * It can be configured to have one channel or two channels. If configured
18 * as two channels, one is to transmit to the video device and another is
19 * to receive from the video device.
21 * This is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmapool.h>
35 #include <linux/of_platform.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_address.h>
38 #include <linux/amba/xilinx_dma.h>
39 #include <linux/of_irq.h>
41 /* Hw specific definitions
43 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
44 #define XILINX_DMA_MAX_TRANS_LEN 0x7FFFFF
46 /* General register bits definitions
48 #define XILINX_DMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
49 #define XILINX_DMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
51 #define XILINX_DMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
52 #define XILINX_DMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
54 #define XILINX_DMA_SR_ERR_INTERNAL_MASK 0x00000010 /* Datamover internal err */
55 #define XILINX_DMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
56 #define XILINX_DMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
57 #define XILINX_DMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
58 #define XILINX_DMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
59 #define XILINX_DMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
60 #define XILINX_DMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
62 #define XILINX_DMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
63 #define XILINX_DMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
64 #define XILINX_DMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
65 #define XILINX_DMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
67 #define XILINX_DMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
68 #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
70 #define XILINX_DMA_IRQ_SHIFT 12
71 #define XILINX_DMA_DELAY_SHIFT 24
72 #define XILINX_DMA_COALESCE_SHIFT 16
74 #define XILINX_DMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
75 #define XILINX_DMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
77 #define XILINX_DMA_RX_CHANNEL_OFFSET 0x30
79 /* Axi CDMA special register bits
81 #define XILINX_CDMA_CR_SGMODE_MASK 0x00000008 /**< Scatter gather mode */
83 #define XILINX_CDMA_SR_SGINCLD_MASK 0x00000008 /**< Hybrid build */
84 #define XILINX_CDMA_XR_IRQ_SIMPLE_ALL_MASK 0x00005000 /**< All interrupts for
86 /* Axi VDMA special register bits
88 #define XILINX_VDMA_CIRC_EN 0x00000002 /* Circular mode */
89 #define XILINX_VDMA_SYNC_EN 0x00000008 /* Sync enable mode */
90 #define XILINX_VDMA_FRMCNT_EN 0x00000010 /* Frm Cnt enable mode */
91 #define XILINX_VDMA_MSTR_MASK 0x00000F00 /* Master in control */
93 #define XILINX_VDMA_EXTFSYNC_SHIFT 6
94 #define XILINX_VDMA_MSTR_SHIFT 8
95 #define XILINX_VDMA_WR_REF_SHIFT 8
97 #define XILINX_VDMA_FRMDLY_SHIFT 24
99 #define XILINX_VDMA_DIRECT_REG_OFFSET 0x50
100 #define XILINX_VDMA_CHAN_DIRECT_REG_SIZE 0x50
102 #define XILINX_VDMA_PARK_REG_OFFSET 0x28
104 /* Axi VDMA Specific Error bits
106 #define XILINX_VDMA_SR_ERR_FSIZE_LESS_MASK 0x00000080 /* FSize Less
108 #define XILINX_VDMA_SR_ERR_LSIZE_LESS_MASK 0x00000100 /* LSize Less
110 #define XILINX_VDMA_SR_ERR_FSIZE_MORE_MASK 0x00000800 /* FSize
112 /* Recoverable errors are DMA Internal error, FSize Less, LSize Less
113 * and FSize More mismatch errors. These are only recoverable only
114 * when C_FLUSH_ON_FSYNC is enabled in the hardware system.
116 #define XILINX_VDMA_SR_ERR_RECOVER_MASK 0x00000990 /* Recoverable
119 /* Axi VDMA Flush on Fsync bits
121 #define XILINX_VDMA_FLUSH_S2MM 3
122 #define XILINX_VDMA_FLUSH_MM2S 2
123 #define XILINX_VDMA_FLUSH_BOTH 1
125 /* BD definitions for Axi Dma and Axi Cdma
127 #define XILINX_DMA_BD_STS_COMPL_MASK 0x80000000
128 #define XILINX_DMA_BD_STS_ERR_MASK 0x70000000
129 #define XILINX_DMA_BD_STS_ALL_MASK 0xF0000000
131 /* Axi DMA BD special bits definitions
133 #define XILINX_DMA_BD_SOP 0x08000000 /* Start of packet bit */
134 #define XILINX_DMA_BD_EOP 0x04000000 /* End of packet bit */
138 #define XILINX_DMA_FTR_DATA_WIDTH_MASK 0x000000FF /* Data width mask, 1024 */
139 #define XILINX_DMA_FTR_HAS_SG 0x00000100 /* Has SG */
140 #define XILINX_DMA_FTR_HAS_SG_SHIFT 8 /* Has SG shift */
141 #define XILINX_DMA_FTR_STSCNTRL_STRM 0x00010000 /* Optional feature for dma */
143 /* Delay loop counter to prevent hardware failure
145 #define XILINX_DMA_RESET_LOOP 1000000
146 #define XILINX_DMA_HALT_LOOP 1000000
148 /* Device Id in the private structure
150 #define XILINX_DMA_DEVICE_ID_SHIFT 28
154 #define DMA_OUT(addr, val) (iowrite32(val, addr))
155 #define DMA_IN(addr) (ioread32(addr))
157 /* Hardware descriptor
159 * shared by all Xilinx DMA engines
161 struct xilinx_dma_desc_hw {
162 u32 next_desc; /* 0x00 */
164 u32 buf_addr; /* 0x08 */
166 u32 addr_vsize; /* 0x10 */
167 u32 hsize; /* 0x14 */
168 u32 control; /* 0x18 */
169 u32 status; /* 0x1C */
170 u32 app_0; /* 0x20 */
171 u32 app_1; /* 0x24 */
172 u32 app_2; /* 0x28 */
173 u32 app_3; /* 0x2C */
174 u32 app_4; /* 0x30 */
175 } __attribute__((aligned(64)));
177 struct xilinx_dma_desc_sw {
178 struct xilinx_dma_desc_hw hw;
179 struct list_head node;
180 struct list_head tx_list;
181 struct dma_async_tx_descriptor async_tx;
182 } __attribute__((aligned(64)));
185 u32 cr; /* 0x00 Control Register */
186 u32 sr; /* 0x04 Status Register */
187 u32 cdr; /* 0x08 Current Descriptor Register */
189 u32 tdr; /* 0x10 Tail Descriptor Register */
191 u32 src; /* 0x18 Source Address Register (cdma) */
193 u32 dst; /* 0x20 Destination Address Register (cdma) */
195 u32 btt_ref;/* 0x28 Bytes To Transfer (cdma) or park_ref (vdma) */
196 u32 version; /* 0x2c version (vdma) */
199 struct vdma_addr_regs {
200 u32 vsize; /* 0x0 Vertical size */
201 u32 hsize; /* 0x4 Horizontal size */
202 u32 frmdly_stride; /* 0x8 Frame delay and stride */
203 u32 buf_addr[16]; /* 0xC - 0x48 Src addresses */
206 /* Per DMA specific operations should be embedded in the channel structure
208 struct xilinx_dma_chan {
209 struct xdma_regs __iomem *regs; /* Control status registers */
210 struct vdma_addr_regs *addr_regs; /* Direct address registers */
211 dma_cookie_t completed_cookie; /* The maximum cookie completed */
212 dma_cookie_t cookie; /* The current cookie */
213 spinlock_t lock; /* Descriptor operation lock */
214 bool sg_waiting; /* Scatter gather transfer waiting */
215 struct list_head active_list; /* Active descriptors */
216 struct list_head pending_list; /* Descriptors waiting */
217 struct dma_chan common; /* DMA common channel */
218 struct dma_pool *desc_pool; /* Descriptors pool */
219 struct device *dev; /* The dma device */
220 int irq; /* Channel IRQ */
221 int id; /* Channel ID */
222 enum dma_transfer_direction direction;/* Transfer direction */
223 int max_len; /* Maximum data len per transfer */
224 int is_lite; /* Whether is light build */
225 int num_frms; /* Number of frames */
226 int has_SG; /* Support scatter transfers */
227 int has_DRE; /* Support unaligned transfers */
228 int genlock; /* Support genlock mode */
229 int err; /* Channel has errors */
230 struct tasklet_struct tasklet; /* Cleanup work after irq */
231 u32 feature; /* IP feature */
232 u32 private; /* Match info for channel request */
233 void (*start_transfer)(struct xilinx_dma_chan *chan);
234 struct xilinx_dma_config config; /* Device configuration info */
235 u32 flush_fsync; /* Flush on Fsync */
238 struct xilinx_dma_device {
241 struct dma_device common;
242 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
247 #define to_xilinx_chan(chan) container_of(chan, struct xilinx_dma_chan, common)
249 /* Required functions
251 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
253 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
255 /* Has this channel already been allocated? */
260 * We need the descriptor to be aligned to 64bytes
261 * for meeting Xilinx DMA specification requirement.
263 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
265 sizeof(struct xilinx_dma_desc_sw),
266 __alignof__(struct xilinx_dma_desc_sw), 0);
267 if (!chan->desc_pool) {
268 dev_err(chan->dev, "unable to allocate channel %d "
269 "descriptor pool\n", chan->id);
273 chan->completed_cookie = 1;
276 /* there is at least one descriptor free to be allocated */
280 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
281 struct list_head *list)
283 struct xilinx_dma_desc_sw *desc, *_desc;
285 list_for_each_entry_safe(desc, _desc, list, node) {
286 list_del(&desc->node);
287 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
291 static void xilinx_dma_free_desc_list_reverse(struct xilinx_dma_chan *chan,
292 struct list_head *list)
294 struct xilinx_dma_desc_sw *desc, *_desc;
296 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
297 list_del(&desc->node);
298 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
302 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
304 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
307 dev_dbg(chan->dev, "Free all channel resources.\n");
308 spin_lock_irqsave(&chan->lock, flags);
309 xilinx_dma_free_desc_list(chan, &chan->active_list);
310 xilinx_dma_free_desc_list(chan, &chan->pending_list);
311 spin_unlock_irqrestore(&chan->lock, flags);
313 dma_pool_destroy(chan->desc_pool);
314 chan->desc_pool = NULL;
317 static enum dma_status xilinx_dma_desc_status(struct xilinx_dma_chan *chan,
318 struct xilinx_dma_desc_sw *desc)
320 return dma_async_is_complete(desc->async_tx.cookie,
321 chan->completed_cookie,
325 static void xilinx_chan_desc_cleanup(struct xilinx_dma_chan *chan)
327 struct xilinx_dma_desc_sw *desc, *_desc;
330 spin_lock_irqsave(&chan->lock, flags);
332 list_for_each_entry_safe(desc, _desc, &chan->active_list, node) {
333 dma_async_tx_callback callback;
334 void *callback_param;
336 if (xilinx_dma_desc_status(chan, desc) == DMA_IN_PROGRESS)
339 /* Remove from the list of running transactions */
340 list_del(&desc->node);
342 /* Run the link descriptor callback function */
343 callback = desc->async_tx.callback;
344 callback_param = desc->async_tx.callback_param;
346 spin_unlock_irqrestore(&chan->lock, flags);
347 callback(callback_param);
348 spin_lock_irqsave(&chan->lock, flags);
351 /* Run any dependencies, then free the descriptor */
352 dma_run_dependencies(&desc->async_tx);
353 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
356 spin_unlock_irqrestore(&chan->lock, flags);
359 static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
361 struct dma_tx_state *txstate)
363 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
364 dma_cookie_t last_used;
365 dma_cookie_t last_complete;
367 xilinx_chan_desc_cleanup(chan);
369 last_used = dchan->cookie;
370 last_complete = chan->completed_cookie;
372 dma_set_tx_state(txstate, last_complete, last_used, 0);
374 return dma_async_is_complete(cookie, last_complete, last_used);
377 static int dma_is_running(struct xilinx_dma_chan *chan)
379 return !(DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_HALTED_MASK) &&
380 (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK);
383 static int dma_is_idle(struct xilinx_dma_chan *chan)
385 return DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_IDLE_MASK;
388 /* Only needed for Axi CDMA v2_00_a or earlier core
390 static void dma_sg_toggle(struct xilinx_dma_chan *chan)
393 DMA_OUT(&chan->regs->cr,
394 DMA_IN(&chan->regs->cr) & ~XILINX_CDMA_CR_SGMODE_MASK);
396 DMA_OUT(&chan->regs->cr,
397 DMA_IN(&chan->regs->cr) | XILINX_CDMA_CR_SGMODE_MASK);
400 #define XILINX_DMA_DRIVER_DEBUG 0
402 #if (XILINX_DMA_DRIVER_DEBUG == 1)
403 static void desc_dump(struct xilinx_dma_desc_hw *hw)
405 printk(KERN_INFO "hw desc %x:\n", (unsigned int)hw);
406 printk(KERN_INFO "\tnext_desc %x\n", hw->next_desc);
407 printk(KERN_INFO "\tbuf_addr %x\n", hw->buf_addr);
408 printk(KERN_INFO "\taddr_vsize %x\n", hw->addr_vsize);
409 printk(KERN_INFO "\thsize %x\n", hw->hsize);
410 printk(KERN_INFO "\tcontrol %x\n", hw->control);
411 printk(KERN_INFO "\tstatus %x\n", hw->status);
416 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
419 struct xilinx_dma_desc_sw *desch, *desct;
420 struct xilinx_dma_desc_hw *hw;
425 spin_lock_irqsave(&chan->lock, flags);
427 if (list_empty(&chan->pending_list))
430 /* If hardware is busy, cannot submit
432 if (!dma_is_idle(chan)) {
433 dev_dbg(chan->dev, "DMA controller still busy %x\n",
434 DMA_IN(&chan->regs->sr));
440 DMA_OUT(&chan->regs->cr,
441 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
443 desch = list_first_entry(&chan->pending_list, struct xilinx_dma_desc_sw,
448 /* If hybrid mode, append pending list to active list
450 desct = container_of(chan->pending_list.prev,
451 struct xilinx_dma_desc_sw, node);
453 list_splice_tail_init(&chan->pending_list, &chan->active_list);
455 /* If hardware is idle, then all descriptors on the active list
456 * are done, start new transfers
460 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
462 /* Update tail ptr register and start the transfer
464 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
470 list_del(&desch->node);
471 list_add_tail(&desch->node, &chan->active_list);
475 DMA_OUT(&chan->regs->src, hw->buf_addr);
476 DMA_OUT(&chan->regs->dst, hw->addr_vsize);
478 /* Start the transfer
480 DMA_OUT(&chan->regs->btt_ref,
481 hw->control & XILINX_DMA_MAX_TRANS_LEN);
484 spin_unlock_irqrestore(&chan->lock, flags);
487 /* If sg mode, link the pending list to running list; if simple mode, get the
488 * head of the pending list and submit it to hw
490 static void xilinx_cdma_issue_pending(struct dma_chan *dchan)
492 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
493 xilinx_cdma_start_transfer(chan);
496 /* Stop the hardware, the ongoing transfer will be finished */
497 static void dma_halt(struct xilinx_dma_chan *chan)
499 int loop = XILINX_DMA_HALT_LOOP;
501 DMA_OUT(&chan->regs->cr,
502 DMA_IN(&chan->regs->cr) & ~XILINX_DMA_CR_RUNSTOP_MASK);
504 /* Wait for the hardware to halt
507 if (!(DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK))
514 pr_debug("Cannot stop channel %x: %x\n",
516 (unsigned int)DMA_IN(&chan->regs->cr));
523 /* Start the hardware. Transfers are not started yet */
524 static void dma_start(struct xilinx_dma_chan *chan)
526 int loop = XILINX_DMA_HALT_LOOP;
528 DMA_OUT(&chan->regs->cr,
529 DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RUNSTOP_MASK);
531 /* Wait for the hardware to start
534 if (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK)
541 pr_debug("Cannot start channel %x: %x\n",
543 (unsigned int)DMA_IN(&chan->regs->cr));
552 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
555 struct xilinx_dma_desc_sw *desch, *desct;
556 struct xilinx_dma_desc_hw *hw;
561 spin_lock_irqsave(&chan->lock, flags);
563 if (list_empty(&chan->pending_list))
566 /* If hardware is busy, cannot submit
568 if (dma_is_running(chan) && !dma_is_idle(chan)) {
569 dev_dbg(chan->dev, "DMA controller still busy\n");
573 /* If hardware is idle, then all descriptors on active list are
574 * done, start new transfers
582 desch = list_first_entry(&chan->pending_list,
583 struct xilinx_dma_desc_sw, node);
585 desct = container_of(chan->pending_list.prev,
586 struct xilinx_dma_desc_sw, node);
588 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
594 list_splice_tail_init(&chan->pending_list, &chan->active_list);
598 DMA_OUT(&chan->regs->cr,
599 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
601 /* Update tail ptr register and start the transfer
603 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
615 printk(KERN_INFO "xilinx_dma_start_transfer::simple DMA mode\n");
617 desch = list_first_entry(&chan->pending_list,
618 struct xilinx_dma_desc_sw, node);
620 list_del(&desch->node);
621 list_add_tail(&desch->node, &chan->active_list);
632 DMA_OUT(&chan->regs->cr,
633 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
635 DMA_OUT(&chan->regs->src, hw->buf_addr);
637 /* Start the transfer
639 DMA_OUT(&chan->regs->btt_ref,
640 hw->control & XILINX_DMA_MAX_TRANS_LEN);
643 spin_unlock_irqrestore(&chan->lock, flags);
646 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
648 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
649 xilinx_dma_start_transfer(chan);
652 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
655 struct xilinx_dma_desc_sw *desch, *desct = NULL;
656 struct xilinx_dma_config *config;
663 spin_lock_irqsave(&chan->lock, flags);
665 if (list_empty(&chan->pending_list))
668 /* If it is SG mode and hardware is busy, cannot submit
670 if (chan->has_SG && dma_is_running(chan) && !dma_is_idle(chan)) {
671 dev_dbg(chan->dev, "DMA controller still busy\n");
675 /* If hardware is idle, then all descriptors on the running lists are
676 * done, start new transfers
682 desch = list_first_entry(&chan->pending_list,
683 struct xilinx_dma_desc_sw, node);
685 desct = container_of(chan->pending_list.prev,
686 struct xilinx_dma_desc_sw, node);
688 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
691 /* Configure the hardware using info in the config structure */
692 config = &(chan->config);
693 reg = DMA_IN(&chan->regs->cr);
695 if (config->frm_cnt_en)
696 reg |= XILINX_VDMA_FRMCNT_EN;
698 reg &= ~XILINX_VDMA_FRMCNT_EN;
700 /* With SG, start with circular mode, so that BDs can be fetched.
701 * In direct register mode, if not parking, enable circular mode */
702 if ((chan->has_SG) || (!config->park))
703 reg |= XILINX_VDMA_CIRC_EN;
706 reg &= ~XILINX_VDMA_CIRC_EN;
708 DMA_OUT(&chan->regs->cr, reg);
710 if ((config->park_frm >= 0) && (config->park_frm < chan->num_frms)) {
711 if (config->direction == DMA_MEM_TO_DEV) {
712 chan_base = (char *)chan->regs;
713 DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
716 chan_base = ((char *)chan->regs -
717 XILINX_DMA_RX_CHANNEL_OFFSET);
718 DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
719 config->park_frm << XILINX_VDMA_WR_REF_SHIFT);
723 /* Start the hardware
729 list_splice_tail_init(&chan->pending_list, &chan->active_list);
733 * park/genlock testing does not use interrupts */
734 if (!chan->config.disable_intr) {
735 DMA_OUT(&chan->regs->cr,
736 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
738 DMA_OUT(&chan->regs->cr,
739 DMA_IN(&chan->regs->cr) |
740 chan->config.disable_intr << XILINX_DMA_IRQ_SHIFT);
743 /* Start the transfer
746 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
748 DMA_OUT(&chan->addr_regs->vsize, config->vsize);
751 spin_unlock_irqrestore(&chan->lock, flags);
754 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
756 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
757 xilinx_vdma_start_transfer(chan);
761 * xilinx_dma_update_completed_cookie - Update the completed cookie.
762 * @chan : xilinx DMA channel
766 static void xilinx_dma_update_completed_cookie(struct xilinx_dma_chan *chan)
768 struct xilinx_dma_desc_sw *desc = NULL;
769 struct xilinx_dma_desc_hw *hw = NULL;
771 dma_cookie_t cookie = -EBUSY;
774 spin_lock_irqsave(&chan->lock, flags);
776 if (list_empty(&chan->active_list)) {
777 dev_dbg(chan->dev, "no running descriptors\n");
781 /* Get the last completed descriptor, update the cookie to that */
782 list_for_each_entry(desc, &chan->active_list, node) {
783 if ((!(chan->feature & XILINX_DMA_IP_VDMA)) && chan->has_SG) {
786 /* If a BD has no status bits set, hw has it */
787 if (!(hw->status & XILINX_DMA_BD_STS_ALL_MASK)) {
791 cookie = desc->async_tx.cookie;
794 /* In non-SG mode, all active entries are done */
796 cookie = desc->async_tx.cookie;
801 chan->completed_cookie = cookie;
804 spin_unlock_irqrestore(&chan->lock, flags);
809 static int dma_init(struct xilinx_dma_chan *chan)
811 int loop = XILINX_DMA_RESET_LOOP;
814 DMA_OUT(&chan->regs->cr,
815 DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RESET_MASK);
817 tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
819 /* Wait for the hardware to finish reset
821 while (loop && tmp) {
822 tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
827 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
828 DMA_IN(&chan->regs->cr), DMA_IN(&chan->regs->sr));
832 /* For Axi CDMA, always do sg transfers if sg mode is built in
834 if ((chan->feature & XILINX_DMA_IP_CDMA) && chan->has_SG)
835 DMA_OUT(&chan->regs->cr, tmp | XILINX_CDMA_CR_SGMODE_MASK);
841 static irqreturn_t dma_intr_handler(int irq, void *data)
843 struct xilinx_dma_chan *chan = data;
844 int update_cookie = 0;
848 reg = DMA_IN(&chan->regs->cr);
852 DMA_OUT(&chan->regs->cr,
853 reg & ~XILINX_DMA_XR_IRQ_ALL_MASK);
855 stat = DMA_IN(&chan->regs->sr);
856 if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK))
859 /* Ack the interrupts
861 DMA_OUT(&chan->regs->sr, XILINX_DMA_XR_IRQ_ALL_MASK);
863 /* Check for only the interrupts which are enabled
865 stat &= (reg & XILINX_DMA_XR_IRQ_ALL_MASK);
867 if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK) {
868 if ((chan->feature & XILINX_DMA_IP_VDMA)
869 && chan->flush_fsync) {
870 /* VDMA Recoverable Errors, only when
871 C_FLUSH_ON_FSYNC is enabled */
872 u32 error = DMA_IN(&chan->regs->sr) &
873 XILINX_VDMA_SR_ERR_RECOVER_MASK;
875 DMA_OUT(&chan->regs->sr, error);
880 "Channel %x has errors %x, cdr %x tdr %x\n",
882 (unsigned int)DMA_IN(&chan->regs->sr),
883 (unsigned int)DMA_IN(&chan->regs->cdr),
884 (unsigned int)DMA_IN(&chan->regs->tdr));
889 /* Device takes too long to do the transfer when user requires
892 if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK)
893 dev_dbg(chan->dev, "Inter-packet latency too long\n");
895 if (stat & XILINX_DMA_XR_IRQ_IOC_MASK) {
901 xilinx_dma_update_completed_cookie(chan);
904 chan->start_transfer(chan);
906 tasklet_schedule(&chan->tasklet);
910 static void dma_do_tasklet(unsigned long data)
912 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
914 xilinx_chan_desc_cleanup(chan);
917 /* Append the descriptor list to the pending list */
918 static void append_desc_queue(struct xilinx_dma_chan *chan,
919 struct xilinx_dma_desc_sw *desc)
921 struct xilinx_dma_desc_sw *tail = container_of(chan->pending_list.prev,
922 struct xilinx_dma_desc_sw, node);
923 struct xilinx_dma_desc_hw *hw;
925 if (list_empty(&chan->pending_list))
928 /* Add the hardware descriptor to the chain of hardware descriptors
929 * that already exists in memory.
932 hw->next_desc = (u32)desc->async_tx.phys;
934 /* Add the software descriptor and all children to the list
935 * of pending transactions
938 list_splice_tail_init(&desc->tx_list, &chan->pending_list);
941 /* Assign cookie to each descriptor, and append the descriptors to the pending
944 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
946 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
947 struct xilinx_dma_desc_sw *desc = container_of(tx,
948 struct xilinx_dma_desc_sw, async_tx);
949 struct xilinx_dma_desc_sw *child;
951 dma_cookie_t cookie = -EBUSY;
954 /* If reset fails, need to hard reset the system.
955 * Channel is no longer functional
963 spin_lock_irqsave(&chan->lock, flags);
966 * assign cookies to all of the software descriptors
967 * that make up this transaction
969 cookie = chan->cookie;
970 list_for_each_entry(child, &desc->tx_list, node) {
973 cookie = DMA_MIN_COOKIE;
975 child->async_tx.cookie = cookie;
978 chan->cookie = cookie;
981 /* put this transaction onto the tail of the pending queue */
982 append_desc_queue(chan, desc);
984 spin_unlock_irqrestore(&chan->lock, flags);
989 static struct xilinx_dma_desc_sw *xilinx_dma_alloc_descriptor(
990 struct xilinx_dma_chan *chan)
992 struct xilinx_dma_desc_sw *desc;
995 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
997 dev_dbg(chan->dev, "out of memory for desc\n");
1001 memset(desc, 0, sizeof(*desc));
1002 INIT_LIST_HEAD(&desc->tx_list);
1003 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1004 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1005 desc->async_tx.phys = pdesc;
1011 * xilinx_dma_prep_memcpy - prepare descriptors for a memcpy transaction
1012 * @dchan: DMA channel
1013 * @dma_dst: destination address
1014 * @dma_src: source address
1015 * @len: transfer length
1016 * @flags: transfer ack flags
1018 static struct dma_async_tx_descriptor *xilinx_dma_prep_memcpy(
1019 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
1020 size_t len, unsigned long flags)
1022 struct xilinx_dma_chan *chan;
1023 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new;
1024 struct xilinx_dma_desc_hw *hw, *prev_hw;
1026 dma_addr_t src = dma_src;
1027 dma_addr_t dst = dma_dst;
1035 chan = to_xilinx_chan(dchan);
1039 /* If reset fails, need to hard reset the system.
1040 * Channel is no longer functional
1042 if (!dma_init(chan))
1048 /* If build does not have Data Realignment Engine (DRE),
1049 * src has to be aligned
1051 if (!chan->has_DRE) {
1053 (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK)) ||
1055 (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK))) {
1058 "Source or destination address not aligned when no DRE\n");
1066 /* Allocate descriptor from DMA pool */
1067 new = xilinx_dma_alloc_descriptor(chan);
1070 "No free memory for link descriptor\n");
1074 copy = min(len, (size_t)chan->max_len);
1076 /* if lite build, transfer cannot cross page boundary
1079 copy = min(copy, (size_t)(PAGE_MASK -
1080 (src & PAGE_MASK)));
1084 "Got zero transfer length for %x\n",
1091 (hw->control & ~XILINX_DMA_MAX_TRANS_LEN) | copy;
1093 hw->addr_vsize = dst;
1098 prev_hw = &(prev->hw);
1099 prev_hw->next_desc = new->async_tx.phys;
1102 new->async_tx.cookie = 0;
1103 async_tx_ack(&new->async_tx);
1110 /* Insert the descriptor to the list */
1111 list_add_tail(&new->node, &first->tx_list);
1114 /* Link the last BD with the first BD */
1115 hw->next_desc = first->async_tx.phys;
1117 new->async_tx.flags = flags; /* client is in control of this ack */
1118 new->async_tx.cookie = -EBUSY;
1120 return &first->async_tx;
1126 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1131 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1132 * @chan: DMA channel
1133 * @sgl: scatterlist to transfer to/from
1134 * @sg_len: number of entries in @scatterlist
1135 * @direction: DMA direction
1136 * @flags: transfer ack flags
1138 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1139 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1140 enum dma_transfer_direction direction, unsigned long flags,
1143 struct xilinx_dma_chan *chan;
1144 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1145 struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1150 struct scatterlist *sg;
1154 #ifdef TEST_DMA_WITH_LOOPBACK
1160 chan = to_xilinx_chan(dchan);
1162 if (chan->direction != direction)
1165 #ifdef TEST_DMA_WITH_LOOPBACK
1168 for_each_sg(sgl, sg, sg_len, i) {
1169 total_len += sg_dma_len(sg);
1173 * Build transactions using information in the scatter gather list
1175 for_each_sg(sgl, sg, sg_len, i) {
1178 /* Loop until the entire scatterlist entry is used */
1179 while (sg_used < sg_dma_len(sg)) {
1181 /* Allocate the link descriptor from DMA pool */
1182 new = xilinx_dma_alloc_descriptor(chan);
1184 dev_err(chan->dev, "No free memory for "
1185 "link descriptor\n");
1190 * Calculate the maximum number of bytes to transfer,
1191 * making sure it is less than the hw limit
1193 copy = min((size_t)(sg_dma_len(sg) - sg_used),
1194 (size_t)chan->max_len);
1197 dma_src = sg_dma_address(sg) + sg_used;
1199 hw->buf_addr = dma_src;
1201 /* Fill in the descriptor */
1205 * If this is not the first descriptor, chain the
1206 * current descriptor after the previous descriptor
1208 * For the first DMA_MEM_TO_DEV transfer, set SOP
1212 if (direction == DMA_MEM_TO_DEV) {
1213 hw->control |= XILINX_DMA_BD_SOP;
1214 #ifdef TEST_DMA_WITH_LOOPBACK
1215 hw->app_4 = total_len;
1219 prev_hw = &(prev->hw);
1220 prev_hw->next_desc = new->async_tx.phys;
1223 new->async_tx.cookie = 0;
1224 async_tx_ack(&new->async_tx);
1229 /* Insert the link descriptor into the LD ring */
1230 list_add_tail(&new->node, &first->tx_list);
1234 /* Link the last BD with the first BD */
1235 hw->next_desc = first->async_tx.phys;
1237 if (direction == DMA_MEM_TO_DEV)
1238 hw->control |= XILINX_DMA_BD_EOP;
1240 /* All scatter gather list entries has length == 0 */
1244 new->async_tx.flags = flags;
1245 new->async_tx.cookie = -EBUSY;
1247 /* Set EOP to the last link descriptor of new list */
1248 hw->control |= XILINX_DMA_BD_EOP;
1250 return &first->async_tx;
1253 /* If first was not set, then we failed to allocate the very first
1254 * descriptor, and we're done */
1259 * First is set, so all of the descriptors we allocated have been added
1260 * to first->tx_list, INCLUDING "first" itself. Therefore we
1261 * must traverse the list backwards freeing each descriptor in turn
1263 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1269 * xilinx_vdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1270 * @chan: VDMA channel
1271 * @sgl: scatterlist to transfer to/from
1272 * @sg_len: number of entries in @scatterlist
1273 * @direction: DMA direction
1274 * @flags: transfer ack flags
1276 static struct dma_async_tx_descriptor *xilinx_vdma_prep_slave_sg(
1277 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1278 enum dma_transfer_direction direction, unsigned long flags,
1281 struct xilinx_dma_chan *chan;
1282 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1283 struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1285 struct scatterlist *sg;
1291 chan = to_xilinx_chan(dchan);
1293 if (chan->direction != direction)
1296 /* Enforce one sg entry for one frame */
1297 if (sg_len != chan->num_frms) {
1298 dev_err(chan->dev, "number of entries %d not the "
1299 "same as num stores %d\n", sg_len, chan->num_frms);
1304 if (!chan->has_SG) {
1305 DMA_OUT(&chan->addr_regs->hsize, chan->config.hsize);
1306 DMA_OUT(&chan->addr_regs->frmdly_stride,
1307 chan->config.frm_dly << XILINX_VDMA_FRMDLY_SHIFT |
1308 chan->config.stride);
1311 /* Build transactions using information in the scatter gather list
1313 for_each_sg(sgl, sg, sg_len, i) {
1315 /* Allocate the link descriptor from DMA pool */
1316 new = xilinx_dma_alloc_descriptor(chan);
1318 dev_err(chan->dev, "No free memory for "
1319 "link descriptor\n");
1324 * Calculate the maximum number of bytes to transfer,
1325 * making sure it is less than the hw limit
1329 dma_src = sg_dma_address(sg);
1331 hw->buf_addr = dma_src;
1333 /* Fill in the descriptor */
1334 hw->addr_vsize = chan->config.vsize;
1335 hw->hsize = chan->config.hsize;
1336 hw->control = (chan->config.frm_dly <<
1337 XILINX_VDMA_FRMDLY_SHIFT) |
1338 chan->config.stride;
1340 /* Update the registers */
1341 DMA_OUT(&(chan->addr_regs->buf_addr[i]), dma_src);
1344 /* If this is not the first descriptor, chain the
1345 * current descriptor after the previous descriptor
1350 prev_hw = &(prev->hw);
1351 prev_hw->next_desc = new->async_tx.phys;
1354 new->async_tx.cookie = 0;
1355 async_tx_ack(&new->async_tx);
1359 /* Insert the link descriptor into the list */
1360 list_add_tail(&new->node, &first->tx_list);
1363 /* Link the last BD with the first BD */
1364 hw->next_desc = first->async_tx.phys;
1369 new->async_tx.flags = flags;
1370 new->async_tx.cookie = -EBUSY;
1372 return &first->async_tx;
1375 /* If first was not set, then we failed to allocate the very first
1376 * descriptor, and we're done */
1380 /* First is set, so all of the descriptors we allocated have been added
1381 * to first->tx_list, INCLUDING "first" itself. Therefore we
1382 * must traverse the list backwards freeing each descriptor in turn
1384 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1388 /* Run-time device configuration for Axi DMA and Axi CDMA */
1389 static int xilinx_dma_device_control(struct dma_chan *dchan,
1390 enum dma_ctrl_cmd cmd, unsigned long arg)
1392 struct xilinx_dma_chan *chan;
1393 unsigned long flags;
1398 chan = to_xilinx_chan(dchan);
1400 if (cmd == DMA_TERMINATE_ALL) {
1401 /* Halt the DMA engine */
1404 spin_lock_irqsave(&chan->lock, flags);
1406 /* Remove and free all of the descriptors in the lists */
1407 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1408 xilinx_dma_free_desc_list(chan, &chan->active_list);
1410 spin_unlock_irqrestore(&chan->lock, flags);
1412 } else if (cmd == DMA_SLAVE_CONFIG) {
1413 /* Configure interrupt coalescing and delay counter
1414 * Use value XILINX_DMA_NO_CHANGE to signal no change
1416 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1417 u32 reg = DMA_IN(&chan->regs->cr);
1419 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1420 reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1421 reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1423 chan->config.coalesc = cfg->coalesc;
1426 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1427 reg &= ~XILINX_DMA_XR_DELAY_MASK;
1428 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1429 chan->config.delay = cfg->delay;
1432 DMA_OUT(&chan->regs->cr, reg);
1439 /* Run-time configuration for Axi VDMA, supports:
1440 * . halt the channel
1441 * . configure interrupt coalescing and inter-packet delay threshold
1442 * . start/stop parking
1444 * . set transfer information using config struct
1446 static int xilinx_vdma_device_control(struct dma_chan *dchan,
1447 enum dma_ctrl_cmd cmd, unsigned long arg)
1449 struct xilinx_dma_chan *chan;
1450 unsigned long flags;
1455 chan = to_xilinx_chan(dchan);
1457 if (cmd == DMA_TERMINATE_ALL) {
1458 /* Halt the DMA engine */
1461 spin_lock_irqsave(&chan->lock, flags);
1463 /* Remove and free all of the descriptors in the lists */
1464 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1465 xilinx_dma_free_desc_list(chan, &chan->active_list);
1467 spin_unlock_irqrestore(&chan->lock, flags);
1469 } else if (cmd == DMA_SLAVE_CONFIG) {
1470 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1476 reg = DMA_IN(&chan->regs->cr);
1478 /* If vsize is -1, it is park-related operations */
1479 if (cfg->vsize == -1) {
1481 reg &= ~XILINX_VDMA_CIRC_EN;
1483 reg |= XILINX_VDMA_CIRC_EN;
1485 DMA_OUT(&chan->regs->cr, reg);
1489 /* If hsize is -1, it is interrupt threshold settings */
1490 if (cfg->hsize == -1) {
1491 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1492 reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1493 reg |= cfg->coalesc <<
1494 XILINX_DMA_COALESCE_SHIFT;
1495 chan->config.coalesc = cfg->coalesc;
1498 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1499 reg &= ~XILINX_DMA_XR_DELAY_MASK;
1500 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1501 chan->config.delay = cfg->delay;
1504 DMA_OUT(&chan->regs->cr, reg);
1508 /* Transfer information */
1509 chan->config.vsize = cfg->vsize;
1510 chan->config.hsize = cfg->hsize;
1511 chan->config.stride = cfg->stride;
1512 chan->config.frm_dly = cfg->frm_dly;
1513 chan->config.park = cfg->park;
1515 /* genlock settings */
1516 chan->config.gen_lock = cfg->gen_lock;
1517 chan->config.master = cfg->master;
1519 if (cfg->gen_lock) {
1520 if (chan->genlock) {
1521 reg |= XILINX_VDMA_SYNC_EN;
1522 reg |= cfg->master << XILINX_VDMA_MSTR_SHIFT;
1526 chan->config.frm_cnt_en = cfg->frm_cnt_en;
1528 chan->config.park_frm = cfg->park_frm;
1530 chan->config.coalesc = cfg->coalesc;
1531 chan->config.delay = cfg->delay;
1532 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1533 reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1534 chan->config.coalesc = cfg->coalesc;
1537 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1538 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1539 chan->config.delay = cfg->delay;
1542 chan->config.disable_intr = cfg->disable_intr;
1545 reg |= cfg->ext_fsync << XILINX_VDMA_EXTFSYNC_SHIFT;
1547 DMA_OUT(&chan->regs->cr, reg);
1554 /* Logarithm function to compute alignment shift
1556 * Only deals with value less than 4096.
1558 static int my_log(int value)
1561 while ((1 << i) < value) {
1573 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1575 irq_dispose_mapping(chan->irq);
1576 list_del(&chan->common.device_node);
1583 * . Get channel features from the device tree entry
1584 * . Initialize special channel handling routines
1586 static int __devinit xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1587 struct device_node *node, u32 feature)
1589 struct xilinx_dma_chan *chan;
1592 u32 width = 0, device_id = 0, flush_fsync = 0;
1595 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1597 dev_err(xdev->dev, "no free memory for DMA channels!\n");
1602 chan->feature = feature;
1606 chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
1608 value = (int *)of_get_property(node, "xlnx,include-dre",
1611 if (be32_to_cpup(value) == 1)
1615 value = (int *)of_get_property(node, "xlnx,genlock-mode",
1618 if (be32_to_cpup(value) == 1)
1622 value = (int *)of_get_property(node,
1626 width = be32_to_cpup(value) >> 3; /* convert bits to bytes */
1628 /* If data width is greater than 8 bytes, DRE is not in hw */
1632 chan->feature |= width - 1;
1635 value = (int *)of_get_property(node, "xlnx,device-id", NULL);
1637 device_id = be32_to_cpup(value);
1639 value = (int *)of_get_property(node, "xlnx,flush-fsync", NULL);
1641 flush_fsync = be32_to_cpup(value);
1643 if (feature & XILINX_DMA_IP_CDMA) {
1644 chan->direction = DMA_MEM_TO_MEM;
1645 chan->start_transfer = xilinx_cdma_start_transfer;
1647 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1648 XILINX_DMA_FTR_HAS_SG_SHIFT;
1650 value = (int *)of_get_property(node,
1651 "xlnx,lite-mode", NULL);
1653 if (be32_to_cpup(value) == 1) {
1655 value = (int *)of_get_property(node,
1656 "xlnx,max-burst-len", NULL);
1660 "Lite mode without data width property\n");
1663 chan->max_len = width *
1664 be32_to_cpup(value);
1670 if (feature & XILINX_DMA_IP_DMA) {
1671 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1672 XILINX_DMA_FTR_HAS_SG_SHIFT;
1674 chan->start_transfer = xilinx_dma_start_transfer;
1676 if (of_device_is_compatible(node,
1677 "xlnx,axi-dma-mm2s-channel"))
1678 chan->direction = DMA_MEM_TO_DEV;
1680 if (of_device_is_compatible(node,
1681 "xlnx,axi-dma-s2mm-channel"))
1682 chan->direction = DMA_DEV_TO_MEM;
1686 if (feature & XILINX_DMA_IP_VDMA) {
1687 chan->start_transfer = xilinx_vdma_start_transfer;
1689 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1690 XILINX_DMA_FTR_HAS_SG_SHIFT;
1692 if (of_device_is_compatible(node,
1693 "xlnx,axi-vdma-mm2s-channel")) {
1694 chan->direction = DMA_MEM_TO_DEV;
1695 if (!chan->has_SG) {
1696 chan->addr_regs = (struct vdma_addr_regs *)
1698 XILINX_VDMA_DIRECT_REG_OFFSET);
1700 if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1701 flush_fsync == XILINX_VDMA_FLUSH_MM2S)
1702 chan->flush_fsync = 1;
1705 if (of_device_is_compatible(node,
1706 "xlnx,axi-vdma-s2mm-channel")) {
1707 chan->direction = DMA_DEV_TO_MEM;
1708 if (!chan->has_SG) {
1709 chan->addr_regs = (struct vdma_addr_regs *)
1711 XILINX_VDMA_DIRECT_REG_OFFSET +
1712 XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
1714 if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1715 flush_fsync == XILINX_VDMA_FLUSH_S2MM)
1716 chan->flush_fsync = 1;
1720 chan->regs = (struct xdma_regs *)xdev->regs;
1723 if (chan->direction == DMA_DEV_TO_MEM) {
1724 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
1725 XILINX_DMA_RX_CHANNEL_OFFSET);
1729 /* Used by dmatest channel matching in slave transfers
1730 * Can change it to be a structure to have more matching information
1732 chan->private = (chan->direction & 0xFF) |
1733 (chan->feature & XILINX_DMA_IP_MASK) |
1734 (device_id << XILINX_DMA_DEVICE_ID_SHIFT);
1735 chan->common.private = (void *)&(chan->private);
1738 xdev->common.copy_align = my_log(width);
1740 chan->dev = xdev->dev;
1741 xdev->chan[chan->id] = chan;
1743 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1745 /* Initialize the channel */
1746 if (dma_init(chan)) {
1747 dev_err(xdev->dev, "Reset channel failed\n");
1752 spin_lock_init(&chan->lock);
1753 INIT_LIST_HEAD(&chan->pending_list);
1754 INIT_LIST_HEAD(&chan->active_list);
1756 chan->common.device = &xdev->common;
1758 /* find the IRQ line, if it exists in the device tree */
1759 chan->irq = irq_of_parse_and_map(node, 0);
1760 err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
1761 "xilinx-dma-controller", chan);
1763 dev_err(xdev->dev, "unable to request IRQ\n");
1767 /* Add the channel to DMA device channel list */
1768 list_add_tail(&chan->common.device_node, &xdev->common.channels);
1769 xdev->common.chancnt++;
1774 irq_dispose_mapping(chan->irq);
1781 static int __devinit xilinx_dma_of_probe(struct platform_device *op)
1783 struct xilinx_dma_device *xdev;
1784 struct device_node *child, *node;
1789 dev_info(&op->dev, "Probing xilinx axi dma engines\n");
1791 xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
1793 dev_err(&op->dev, "Not enough memory for device\n");
1798 xdev->dev = &(op->dev);
1799 INIT_LIST_HEAD(&xdev->common.channels);
1801 node = op->dev.of_node;
1804 /* iomap registers */
1805 xdev->regs = of_iomap(node, 0);
1807 dev_err(&op->dev, "unable to iomap registers\n");
1812 /* Axi CDMA only does memcpy
1814 if (of_device_is_compatible(node, "xlnx,axi-cdma")) {
1815 xdev->feature |= XILINX_DMA_IP_CDMA;
1817 value = (int *)of_get_property(node, "xlnx,include-sg",
1820 if (be32_to_cpup(value) == 1)
1821 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1824 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
1825 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
1826 xdev->common.device_control = xilinx_dma_device_control;
1827 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
1830 /* Axi DMA and VDMA only do slave transfers
1832 if (of_device_is_compatible(node, "xlnx,axi-dma")) {
1834 xdev->feature |= XILINX_DMA_IP_DMA;
1835 value = (int *)of_get_property(node,
1836 "xlnx,sg-include-stscntrl-strm",
1839 if (be32_to_cpup(value) == 1) {
1840 xdev->feature |= (XILINX_DMA_FTR_STSCNTRL_STRM |
1841 XILINX_DMA_FTR_HAS_SG);
1845 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1846 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1847 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
1848 xdev->common.device_control = xilinx_dma_device_control;
1849 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
1852 if (of_device_is_compatible(node, "xlnx,axi-vdma")) {
1853 xdev->feature |= XILINX_DMA_IP_VDMA;
1855 value = (int *)of_get_property(node, "xlnx,include-sg",
1858 if (be32_to_cpup(value) == 1)
1859 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1862 value = (int *)of_get_property(node, "xlnx,num-fstores",
1865 num_frames = be32_to_cpup(value);
1867 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1868 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1869 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
1870 xdev->common.device_control = xilinx_vdma_device_control;
1871 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1874 xdev->common.device_alloc_chan_resources =
1875 xilinx_dma_alloc_chan_resources;
1876 xdev->common.device_free_chan_resources =
1877 xilinx_dma_free_chan_resources;
1878 xdev->common.device_tx_status = xilinx_tx_status;
1879 xdev->common.dev = &op->dev;
1881 dev_set_drvdata(&op->dev, xdev);
1883 for_each_child_of_node(node, child) {
1884 xilinx_dma_chan_probe(xdev, child, xdev->feature);
1887 if (xdev->feature & XILINX_DMA_IP_VDMA) {
1890 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1892 xdev->chan[i]->num_frms = num_frames;
1896 dma_async_device_register(&xdev->common);
1907 static int __devexit xilinx_dma_of_remove(struct platform_device *op)
1909 struct xilinx_dma_device *xdev;
1912 xdev = dev_get_drvdata(&op->dev);
1913 dma_async_device_unregister(&xdev->common);
1915 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1917 xilinx_dma_chan_remove(xdev->chan[i]);
1920 iounmap(xdev->regs);
1921 dev_set_drvdata(&op->dev, NULL);
1927 static const struct of_device_id xilinx_dma_of_ids[] = {
1928 { .compatible = "xlnx,axi-cdma",},
1929 { .compatible = "xlnx,axi-dma",},
1930 { .compatible = "xlnx,axi-vdma",},
1934 static struct platform_driver xilinx_dma_of_driver = {
1936 .name = "xilinx-dma",
1937 .owner = THIS_MODULE,
1938 .of_match_table = xilinx_dma_of_ids,
1940 .probe = xilinx_dma_of_probe,
1941 .remove = __devexit_p(xilinx_dma_of_remove),
1944 /*----------------------------------------------------------------------------*/
1945 /* Module Init / Exit */
1946 /*----------------------------------------------------------------------------*/
1948 static __init int xilinx_dma_init(void)
1952 pr_info("Xilinx DMA driver\n");
1954 ret = platform_driver_register(&xilinx_dma_of_driver);
1956 pr_err("xilinx_dma: failed to register platform driver\n");
1961 static void __exit xilinx_dma_exit(void)
1963 platform_driver_unregister(&xilinx_dma_of_driver);
1966 subsys_initcall(xilinx_dma_init);
1967 module_exit(xilinx_dma_exit);
1971 /**************************************************/
1972 /* Platform bus to support ARM before device tree */
1973 /**************************************************/
1975 /* The following probe and chan_probe functions were
1976 copied from the OF section above, then modified
1977 to use platform data.
1980 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1982 free_irq(chan->irq, chan);
1983 list_del(&chan->common.device_node);
1990 * . Get channel features from the device tree entry
1991 * . Initialize special channel handling routines
1993 static int __devinit xilinx_dma_chan_probe(struct platform_device *pdev,
1994 struct xilinx_dma_device *xdev,
1995 struct dma_channel_config *channel_config,
1996 int channel_num, u32 feature)
1998 struct xilinx_dma_chan *chan;
2001 struct resource *res;
2006 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2008 dev_err(xdev->dev, "no free memory for DMA channels!\n");
2013 chan->feature = feature;
2017 chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
2019 if (channel_config->include_dre)
2022 if (channel_config->genlock_mode)
2025 width = channel_config->datawidth >> 3;
2026 chan->feature |= width - 1;
2028 if (feature & XILINX_DMA_IP_CDMA) {
2030 chan->direction = DMA_MEM_TO_MEM;
2031 chan->start_transfer = xilinx_cdma_start_transfer;
2033 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2034 XILINX_DMA_FTR_HAS_SG_SHIFT;
2036 if (channel_config->lite_mode) {
2038 chan->max_len = width * channel_config->max_burst_len;
2042 if (feature & XILINX_DMA_IP_DMA) {
2044 chan->start_transfer = xilinx_dma_start_transfer;
2046 if (!strcmp(channel_config->type, "axi-dma-mm2s-channel"))
2047 chan->direction = DMA_MEM_TO_DEV;
2049 if (!strcmp(channel_config->type, "axi-dma-s2mm-channel"))
2050 chan->direction = DMA_DEV_TO_MEM;
2053 if (feature & XILINX_DMA_IP_VDMA) {
2055 chan->start_transfer = xilinx_vdma_start_transfer;
2057 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2058 XILINX_DMA_FTR_HAS_SG_SHIFT;
2060 if (!strcmp(channel_config->type, "axi-vdma-mm2s-channel")) {
2062 printk(KERN_INFO, "axi-vdma-mm2s-channel found\n");
2064 chan->direction = DMA_MEM_TO_DEV;
2065 if (!chan->has_SG) {
2066 chan->addr_regs = (struct vdma_addr_regs *)
2068 XILINX_VDMA_DIRECT_REG_OFFSET);
2072 if (!strcmp(channel_config->type, "axi-vdma-s2mm-channel")) {
2074 printk(KERN_INFO, "axi-vdma-s2mm-channel found\n");
2076 chan->direction = DMA_DEV_TO_MEM;
2077 if (!chan->has_SG) {
2078 chan->addr_regs = (struct vdma_addr_regs *)
2080 XILINX_VDMA_DIRECT_REG_OFFSET +
2081 XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
2086 chan->regs = (struct xdma_regs *)xdev->regs;
2089 if (chan->direction == DMA_DEV_TO_MEM) {
2090 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
2091 XILINX_DMA_RX_CHANNEL_OFFSET);
2095 /* Used by dmatest channel matching in slave transfers
2096 * Can change it to be a structure to have more matching information
2098 chan->private = (chan->direction & 0xFF) |
2099 (chan->feature & XILINX_DMA_IP_MASK);
2100 chan->common.private = (void *)&(chan->private);
2103 xdev->common.copy_align = my_log(width);
2105 chan->dev = xdev->dev;
2106 xdev->chan[chan->id] = chan;
2108 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
2110 /* Initialize the channel */
2111 if (dma_init(chan)) {
2112 dev_err(xdev->dev, "Reset channel failed\n");
2117 spin_lock_init(&chan->lock);
2118 INIT_LIST_HEAD(&chan->pending_list);
2119 INIT_LIST_HEAD(&chan->active_list);
2121 chan->common.device = &xdev->common;
2123 /* setup the interrupt for the channel */
2125 res = platform_get_resource(pdev, IORESOURCE_IRQ, channel_num);
2126 chan->irq = res->start;
2128 err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
2129 "xilinx-dma-controller", chan);
2131 dev_err(xdev->dev, "unable to request IRQ\n");
2134 dev_info(&pdev->dev, "using irq %d\n", chan->irq);
2136 /* Add the channel to DMA device channel list */
2137 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2138 xdev->common.chancnt++;
2143 free_irq(chan->irq, chan);
2150 static int __devinit xilinx_dma_probe(struct platform_device *pdev)
2152 struct xilinx_dma_device *xdev;
2155 struct resource *res;
2156 struct device *dev = &pdev->dev;
2157 struct dma_device_config *dma_config;
2160 dev_info(&pdev->dev, "Probing xilinx axi dma engines\n");
2162 xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
2164 dev_err(&pdev->dev, "Not enough memory for device\n");
2169 xdev->dev = &(pdev->dev);
2170 INIT_LIST_HEAD(&xdev->common.channels);
2174 /* iomap registers */
2175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2177 printk(KERN_ERR "get_resource for MEM resource for dev %d "
2178 "failed\n", pdev->id);
2182 dev_info(&pdev->dev, "device %d actual base is %x\n",
2183 pdev->id, (unsigned int)res->start);
2185 if (!request_mem_region(res->start, 0x1000, "xilinx_axidma")) {
2186 printk(KERN_ERR "memory request failue for base %x\n",
2187 (unsigned int)res->start);
2192 xdev->regs = ioremap(res->start, 0x1000);
2193 pr_info("dma base remapped: %lx\n", (unsigned long)xdev->regs);
2195 dev_err(&pdev->dev, "unable to iomap registers\n");
2200 dma_config = (struct dma_device_config *)dev->platform_data;
2202 /* Axi CDMA only does memcpy
2204 if (!strcmp(dma_config->type, "axi-cdma")) {
2206 pr_info("found an axi-cdma configuration\n");
2207 xdev->feature |= XILINX_DMA_IP_CDMA;
2209 if (dma_config->include_sg)
2210 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2212 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2213 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
2214 xdev->common.device_control = xilinx_dma_device_control;
2215 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
2218 /* Axi DMA and VDMA only do slave transfers
2220 if (!strcmp(dma_config->type, "axi-dma")) {
2222 pr_info("found an axi-dma configuration\n");
2224 xdev->feature |= XILINX_DMA_IP_DMA;
2225 if (dma_config->sg_include_stscntrl_strm)
2226 xdev->feature |= XILINX_DMA_FTR_STSCNTRL_STRM;
2228 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2229 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2230 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2231 xdev->common.device_control = xilinx_dma_device_control;
2232 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2235 if (!strcmp(dma_config->type, "axi-vdma")) {
2237 pr_info("found an axi-vdma configuration\n");
2239 xdev->feature |= XILINX_DMA_IP_VDMA;
2241 if (dma_config->include_sg)
2242 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2244 num_frames = dma_config->num_fstores;
2246 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2247 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2248 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
2249 xdev->common.device_control = xilinx_vdma_device_control;
2250 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
2253 xdev->common.device_alloc_chan_resources =
2254 xilinx_dma_alloc_chan_resources;
2255 xdev->common.device_free_chan_resources =
2256 xilinx_dma_free_chan_resources;
2257 xdev->common.device_tx_status = xilinx_tx_status;
2258 xdev->common.dev = &pdev->dev;
2260 dev_set_drvdata(&pdev->dev, xdev);
2262 for (channel = 0; channel < dma_config->channel_count; channel++)
2263 xilinx_dma_chan_probe(pdev, xdev,
2264 &dma_config->channel_config[channel],
2265 channel, xdev->feature);
2267 if (xdev->feature & XILINX_DMA_IP_VDMA) {
2270 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
2272 xdev->chan[i]->num_frms = num_frames;
2276 dma_async_device_register(&xdev->common);
2288 static int __exit xilinx_dma_remove(struct platform_device *pdev)
2290 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2293 dma_async_device_unregister(&xdev->common);
2295 for (i = 0; i < 2; i++) {
2297 xilinx_dma_chan_remove(xdev->chan[i]);
2300 iounmap(xdev->regs);
2301 dev_set_drvdata(&pdev->dev, NULL);
2307 static void xilinx_dma_shutdown(struct platform_device *pdev)
2309 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2312 for (i = 0; i < 2; i++)
2313 dma_halt(xdev->chan[i]);
2316 static struct platform_driver xilinx_dma_driver = {
2317 .probe = xilinx_dma_probe,
2318 .remove = __exit_p(xilinx_dma_remove),
2319 .shutdown = xilinx_dma_shutdown,
2321 .owner = THIS_MODULE,
2322 .name = "xilinx-axidma",
2326 /*----------------------------------------------------------------------------*/
2327 /* Module Init / Exit */
2328 /*----------------------------------------------------------------------------*/
2330 static __init int xilinx_dma_init(void)
2333 status = platform_driver_register(&xilinx_dma_driver);
2336 module_init(xilinx_dma_init);
2338 static void __exit xilinx_dma_exit(void)
2340 platform_driver_unregister(&xilinx_dma_driver);
2343 module_exit(xilinx_dma_exit);
2346 MODULE_DESCRIPTION("Xilinx DMA/CDMA/VDMA driver");
2347 MODULE_LICENSE("GPL");