]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/dma/xilinx_dma.c
Xilinx: ARM: DMA: Code Cleanup
[zynq/linux.git] / drivers / dma / xilinx_dma.c
1 /*
2  * Xilinx DMA Engine support
3  *
4  * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
5  *
6  * Based on the Freescale DMA driver.
7  *
8  * Description:
9  * This driver supports three Xilinx DMA engines:
10  *  . Axi CDMA engine, it does transfers between memory and memory, it
11  *    only has one channel.
12  *  . Axi DMA engine, it does transfers between memory and device. It can be
13  *    configured to have one channel or two channels. If configured as two
14  *    channels, one is to transmit to a device and another is to receive from
15  *    a device.
16  *  . Axi VDMA engine, it does transfers between memory and video devices.
17  *    It can be configured to have one channel or two channels. If configured
18  *    as two channels, one is to transmit to the video device and another is
19  *    to receive from the video device.
20  *
21  * This is free software; you can redistribute it and/or modify
22  * it under the terms of the GNU General Public License as published by
23  * the Free Software Foundation; either version 2 of the License, or
24  * (at your option) any later version.
25  *
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmapool.h>
33 #include <asm/io.h>
34 #include <linux/of.h>
35 #include <linux/of_platform.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_address.h>
38 #include <linux/amba/xilinx_dma.h>
39 #include <linux/of_irq.h>
40
41 /* Hw specific definitions
42  */
43 #define XILINX_DMA_MAX_CHANS_PER_DEVICE  0x2
44 #define XILINX_DMA_MAX_TRANS_LEN         0x7FFFFF
45
46 /* General register bits definitions
47  */
48 #define XILINX_DMA_CR_RESET_MASK    0x00000004  /* Reset DMA engine */
49 #define XILINX_DMA_CR_RUNSTOP_MASK  0x00000001  /* Start/stop DMA engine */
50
51 #define XILINX_DMA_SR_HALTED_MASK   0x00000001  /* DMA channel halted */
52 #define XILINX_DMA_SR_IDLE_MASK     0x00000002  /* DMA channel idle */
53
54 #define XILINX_DMA_SR_ERR_INTERNAL_MASK 0x00000010 /* Datamover internal err */
55 #define XILINX_DMA_SR_ERR_SLAVE_MASK    0x00000020 /* Datamover slave err */
56 #define XILINX_DMA_SR_ERR_DECODE_MASK   0x00000040 /* Datamover decode err */
57 #define XILINX_DMA_SR_ERR_SG_INT_MASK   0x00000100 /* SG internal err */
58 #define XILINX_DMA_SR_ERR_SG_SLV_MASK   0x00000200 /* SG slave err */
59 #define XILINX_DMA_SR_ERR_SG_DEC_MASK   0x00000400 /* SG decode err */
60 #define XILINX_DMA_SR_ERR_ALL_MASK      0x00000770 /* All errors */
61
62 #define XILINX_DMA_XR_IRQ_IOC_MASK      0x00001000 /* Completion interrupt */
63 #define XILINX_DMA_XR_IRQ_DELAY_MASK    0x00002000 /* Delay interrupt */
64 #define XILINX_DMA_XR_IRQ_ERROR_MASK    0x00004000 /* Error interrupt */
65 #define XILINX_DMA_XR_IRQ_ALL_MASK          0x00007000 /* All interrupts */
66
67 #define XILINX_DMA_XR_DELAY_MASK    0xFF000000 /* Delay timeout counter */
68 #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
69
70 #define XILINX_DMA_IRQ_SHIFT      12
71 #define XILINX_DMA_DELAY_SHIFT    24
72 #define XILINX_DMA_COALESCE_SHIFT 16
73
74 #define XILINX_DMA_DELAY_MAX     0xFF /**< Maximum delay counter value */
75 #define XILINX_DMA_COALESCE_MAX  0xFF /**< Maximum coalescing counter value */
76
77 #define XILINX_DMA_RX_CHANNEL_OFFSET      0x30
78
79 /* Axi CDMA special register bits
80  */
81 #define XILINX_CDMA_CR_SGMODE_MASK      0x00000008 /**< Scatter gather mode */
82
83 #define XILINX_CDMA_SR_SGINCLD_MASK        0x00000008  /**< Hybrid build */
84 #define XILINX_CDMA_XR_IRQ_SIMPLE_ALL_MASK 0x00005000 /**< All interrupts for
85                                                         simple only mode */
86 /* Axi VDMA special register bits
87  */
88 #define XILINX_VDMA_CIRC_EN         0x00000002  /* Circular mode */
89 #define XILINX_VDMA_SYNC_EN         0x00000008  /* Sync enable mode */
90 #define XILINX_VDMA_FRMCNT_EN       0x00000010  /* Frm Cnt enable mode */
91 #define XILINX_VDMA_MSTR_MASK       0x00000F00  /* Master in control */
92
93 #define XILINX_VDMA_EXTFSYNC_SHIFT  6
94 #define XILINX_VDMA_MSTR_SHIFT      8
95 #define XILINX_VDMA_WR_REF_SHIFT    8
96
97 #define XILINX_VDMA_FRMDLY_SHIFT  24
98
99 #define XILINX_VDMA_DIRECT_REG_OFFSET     0x50
100 #define XILINX_VDMA_CHAN_DIRECT_REG_SIZE  0x50
101
102 #define XILINX_VDMA_PARK_REG_OFFSET      0x28
103
104 /* Axi VDMA Specific Error bits
105  */
106 #define XILINX_VDMA_SR_ERR_FSIZE_LESS_MASK    0x00000080 /* FSize Less
107                                                         Mismatch err */
108 #define XILINX_VDMA_SR_ERR_LSIZE_LESS_MASK    0x00000100 /* LSize Less
109                                                         Mismatch err */
110 #define XILINX_VDMA_SR_ERR_FSIZE_MORE_MASK    0x00000800 /* FSize
111                                                         more err */
112 /* Recoverable errors are DMA Internal error, FSize Less, LSize Less
113  * and FSize More mismatch errors.  These are only recoverable only
114  * when C_FLUSH_ON_FSYNC is enabled in the hardware system.
115  */
116 #define XILINX_VDMA_SR_ERR_RECOVER_MASK       0x00000990 /* Recoverable
117                                                         errs */
118
119 /* Axi VDMA Flush on Fsync bits
120  */
121 #define XILINX_VDMA_FLUSH_S2MM  3
122 #define XILINX_VDMA_FLUSH_MM2S  2
123 #define XILINX_VDMA_FLUSH_BOTH  1
124
125 /* BD definitions for Axi Dma and Axi Cdma
126  */
127 #define XILINX_DMA_BD_STS_COMPL_MASK 0x80000000
128 #define XILINX_DMA_BD_STS_ERR_MASK   0x70000000
129 #define XILINX_DMA_BD_STS_ALL_MASK   0xF0000000
130
131 /* Axi DMA BD special bits definitions
132  */
133 #define XILINX_DMA_BD_SOP       0x08000000    /* Start of packet bit */
134 #define XILINX_DMA_BD_EOP       0x04000000    /* End of packet bit */
135
136 /* Feature encodings
137  */
138 #define XILINX_DMA_FTR_DATA_WIDTH_MASK 0x000000FF /* Data width mask, 1024 */
139 #define XILINX_DMA_FTR_HAS_SG          0x00000100 /* Has SG */
140 #define XILINX_DMA_FTR_HAS_SG_SHIFT    8          /* Has SG shift */
141 #define XILINX_DMA_FTR_STSCNTRL_STRM   0x00010000 /* Optional feature for dma */
142
143 /* Delay loop counter to prevent hardware failure
144  */
145 #define XILINX_DMA_RESET_LOOP            1000000
146 #define XILINX_DMA_HALT_LOOP             1000000
147
148 /* Device Id in the private structure
149  */
150 #define XILINX_DMA_DEVICE_ID_SHIFT     28
151
152 /* IO accessors
153  */
154 #define DMA_OUT(addr, val)  (iowrite32(val, addr))
155 #define DMA_IN(addr)  (ioread32(addr))
156
157 /* Hardware descriptor
158  *
159  * shared by all Xilinx DMA engines
160  */
161 struct xilinx_dma_desc_hw {
162         u32 next_desc;  /* 0x00 */
163         u32 pad1;       /* 0x04 */
164         u32 buf_addr;   /* 0x08 */
165         u32 pad2;       /* 0x0C */
166         u32 addr_vsize; /* 0x10 */
167         u32 hsize;      /* 0x14 */
168         u32 control;    /* 0x18 */
169         u32 status;     /* 0x1C */
170         u32 app_0;      /* 0x20 */
171         u32 app_1;      /* 0x24 */
172         u32 app_2;      /* 0x28 */
173         u32 app_3;      /* 0x2C */
174         u32 app_4;      /* 0x30 */
175 } __attribute__((aligned(64)));
176
177 struct xilinx_dma_desc_sw {
178         struct xilinx_dma_desc_hw hw;
179         struct list_head node;
180         struct list_head tx_list;
181         struct dma_async_tx_descriptor async_tx;
182 } __attribute__((aligned(64)));
183
184 struct xdma_regs {
185         u32 cr;     /* 0x00 Control Register */
186         u32 sr;     /* 0x04 Status Register */
187         u32 cdr;    /* 0x08 Current Descriptor Register */
188         u32 pad1;
189         u32 tdr;    /* 0x10 Tail Descriptor Register */
190         u32 pad2;
191         u32 src;    /* 0x18 Source Address Register (cdma) */
192         u32 pad3;
193         u32 dst;    /* 0x20 Destination Address Register (cdma) */
194         u32 pad4;
195         u32 btt_ref;/* 0x28 Bytes To Transfer (cdma) or park_ref (vdma) */
196         u32 version;         /* 0x2c version (vdma) */
197 };
198
199 struct vdma_addr_regs {
200         u32 vsize;          /* 0x0 Vertical size */
201         u32 hsize;          /* 0x4 Horizontal size */
202         u32 frmdly_stride;  /* 0x8 Frame delay and stride */
203         u32 buf_addr[16];   /* 0xC - 0x48 Src addresses */
204 };
205
206 /* Per DMA specific operations should be embedded in the channel structure
207  */
208 struct xilinx_dma_chan {
209         struct xdma_regs __iomem *regs;   /* Control status registers */
210         struct vdma_addr_regs *addr_regs; /* Direct address registers */
211         dma_cookie_t completed_cookie;    /* The maximum cookie completed */
212         dma_cookie_t cookie;              /* The current cookie */
213         spinlock_t lock;                  /* Descriptor operation lock */
214         bool   sg_waiting;                /* Scatter gather transfer waiting */
215         struct list_head active_list;     /* Active descriptors */
216         struct list_head pending_list;    /* Descriptors waiting */
217         struct dma_chan common;           /* DMA common channel */
218         struct dma_pool *desc_pool;       /* Descriptors pool */
219         struct device *dev;               /* The dma device */
220         int    irq;                       /* Channel IRQ */
221         int    id;                        /* Channel ID */
222         enum dma_transfer_direction direction;/* Transfer direction */
223         int    max_len;                   /* Maximum data len per transfer */
224         int    is_lite;                   /* Whether is light build */
225         int    num_frms;                  /* Number of frames */
226         int    has_SG;                    /* Support scatter transfers */
227         int    has_DRE;                   /* Support unaligned transfers */
228         int    genlock;                   /* Support genlock mode */
229         int    err;                       /* Channel has errors */
230         struct tasklet_struct tasklet;    /* Cleanup work after irq */
231         u32    feature;                   /* IP feature */
232         u32    private;                   /* Match info for channel request */
233         void   (*start_transfer)(struct xilinx_dma_chan *chan);
234         struct xilinx_dma_config config;  /* Device configuration info */
235         u32    flush_fsync;               /* Flush on Fsync */
236 };
237
238 struct xilinx_dma_device {
239         void __iomem *regs;
240         struct device *dev;
241         struct dma_device common;
242         struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
243         u32 feature;
244         int irq;
245 };
246
247 #define to_xilinx_chan(chan) container_of(chan, struct xilinx_dma_chan, common)
248
249 /* Required functions
250  */
251 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
252 {
253         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
254
255         /* Has this channel already been allocated? */
256         if (chan->desc_pool)
257                 return 1;
258
259         /*
260          * We need the descriptor to be aligned to 64bytes
261          * for meeting Xilinx DMA specification requirement.
262          */
263         chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
264                                   chan->dev,
265                                   sizeof(struct xilinx_dma_desc_sw),
266                                   __alignof__(struct xilinx_dma_desc_sw), 0);
267         if (!chan->desc_pool) {
268                 dev_err(chan->dev, "unable to allocate channel %d "
269                                    "descriptor pool\n", chan->id);
270                 return -ENOMEM;
271         }
272
273         chan->completed_cookie = 1;
274         chan->cookie = 1;
275
276         /* there is at least one descriptor free to be allocated */
277         return 1;
278 }
279
280 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
281                                   struct list_head *list)
282 {
283         struct xilinx_dma_desc_sw *desc, *_desc;
284
285         list_for_each_entry_safe(desc, _desc, list, node) {
286                 list_del(&desc->node);
287                 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
288         }
289 }
290
291 static void xilinx_dma_free_desc_list_reverse(struct xilinx_dma_chan *chan,
292                                           struct list_head *list)
293 {
294         struct xilinx_dma_desc_sw *desc, *_desc;
295
296         list_for_each_entry_safe_reverse(desc, _desc, list, node) {
297                 list_del(&desc->node);
298                 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
299         }
300 }
301
302 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
303 {
304         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
305         unsigned long flags;
306
307         dev_dbg(chan->dev, "Free all channel resources.\n");
308         spin_lock_irqsave(&chan->lock, flags);
309         xilinx_dma_free_desc_list(chan, &chan->active_list);
310         xilinx_dma_free_desc_list(chan, &chan->pending_list);
311         spin_unlock_irqrestore(&chan->lock, flags);
312
313         dma_pool_destroy(chan->desc_pool);
314         chan->desc_pool = NULL;
315 }
316
317 static enum dma_status xilinx_dma_desc_status(struct xilinx_dma_chan *chan,
318                                           struct xilinx_dma_desc_sw *desc)
319 {
320         return dma_async_is_complete(desc->async_tx.cookie,
321                                      chan->completed_cookie,
322                                      chan->cookie);
323 }
324
325 static void xilinx_chan_desc_cleanup(struct xilinx_dma_chan *chan)
326 {
327         struct xilinx_dma_desc_sw *desc, *_desc;
328         unsigned long flags;
329
330         spin_lock_irqsave(&chan->lock, flags);
331
332         list_for_each_entry_safe(desc, _desc, &chan->active_list, node) {
333                 dma_async_tx_callback callback;
334                 void *callback_param;
335
336                 if (xilinx_dma_desc_status(chan, desc) == DMA_IN_PROGRESS)
337                         break;
338
339                 /* Remove from the list of running transactions */
340                 list_del(&desc->node);
341
342                 /* Run the link descriptor callback function */
343                 callback = desc->async_tx.callback;
344                 callback_param = desc->async_tx.callback_param;
345                 if (callback) {
346                         spin_unlock_irqrestore(&chan->lock, flags);
347                         callback(callback_param);
348                         spin_lock_irqsave(&chan->lock, flags);
349                 }
350
351                 /* Run any dependencies, then free the descriptor */
352                 dma_run_dependencies(&desc->async_tx);
353                 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
354         }
355
356         spin_unlock_irqrestore(&chan->lock, flags);
357 }
358
359 static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
360                                         dma_cookie_t cookie,
361                                         struct dma_tx_state *txstate)
362 {
363         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
364         dma_cookie_t last_used;
365         dma_cookie_t last_complete;
366
367         xilinx_chan_desc_cleanup(chan);
368
369         last_used = dchan->cookie;
370         last_complete = chan->completed_cookie;
371
372         dma_set_tx_state(txstate, last_complete, last_used, 0);
373
374         return dma_async_is_complete(cookie, last_complete, last_used);
375 }
376
377 static int dma_is_running(struct xilinx_dma_chan *chan)
378 {
379         return !(DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_HALTED_MASK) &&
380            (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK);
381 }
382
383 static int dma_is_idle(struct xilinx_dma_chan *chan)
384 {
385         return DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_IDLE_MASK;
386 }
387
388 /* Only needed for Axi CDMA v2_00_a or earlier core
389  */
390 static void dma_sg_toggle(struct xilinx_dma_chan *chan)
391 {
392
393         DMA_OUT(&chan->regs->cr,
394             DMA_IN(&chan->regs->cr) & ~XILINX_CDMA_CR_SGMODE_MASK);
395
396         DMA_OUT(&chan->regs->cr,
397             DMA_IN(&chan->regs->cr) | XILINX_CDMA_CR_SGMODE_MASK);
398 }
399
400 #define XILINX_DMA_DRIVER_DEBUG 0
401
402 #if (XILINX_DMA_DRIVER_DEBUG == 1)
403 static void desc_dump(struct xilinx_dma_desc_hw *hw)
404 {
405         printk(KERN_INFO "hw desc %x:\n", (unsigned int)hw);
406         printk(KERN_INFO "\tnext_desc %x\n", hw->next_desc);
407         printk(KERN_INFO "\tbuf_addr %x\n", hw->buf_addr);
408         printk(KERN_INFO "\taddr_vsize %x\n", hw->addr_vsize);
409         printk(KERN_INFO "\thsize %x\n", hw->hsize);
410         printk(KERN_INFO "\tcontrol %x\n", hw->control);
411         printk(KERN_INFO "\tstatus %x\n", hw->status);
412
413 }
414 #endif
415
416 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
417 {
418         unsigned long flags;
419         struct xilinx_dma_desc_sw *desch, *desct;
420         struct xilinx_dma_desc_hw *hw;
421
422         if (chan->err)
423                 return;
424
425         spin_lock_irqsave(&chan->lock, flags);
426
427         if (list_empty(&chan->pending_list))
428                 goto out_unlock;
429
430         /* If hardware is busy, cannot submit
431          */
432         if (!dma_is_idle(chan)) {
433                 dev_dbg(chan->dev, "DMA controller still busy %x\n",
434                                         DMA_IN(&chan->regs->sr));
435                 goto out_unlock;
436         }
437
438         /* Enable interrupts
439          */
440         DMA_OUT(&chan->regs->cr,
441             DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
442
443         desch = list_first_entry(&chan->pending_list, struct xilinx_dma_desc_sw,
444                                         node);
445
446         if (chan->has_SG) {
447
448                 /* If hybrid mode, append pending list to active list
449                  */
450                 desct = container_of(chan->pending_list.prev,
451                                 struct xilinx_dma_desc_sw, node);
452
453                 list_splice_tail_init(&chan->pending_list, &chan->active_list);
454
455                 /* If hardware is idle, then all descriptors on the active list
456                  * are done, start new transfers
457                  */
458                 dma_sg_toggle(chan);
459
460                 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
461
462                 /* Update tail ptr register and start the transfer
463                  */
464                 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
465                 goto out_unlock;
466         }
467
468         /* In simple mode
469          */
470         list_del(&desch->node);
471         list_add_tail(&desch->node, &chan->active_list);
472
473         hw = &desch->hw;
474
475         DMA_OUT(&chan->regs->src, hw->buf_addr);
476         DMA_OUT(&chan->regs->dst, hw->addr_vsize);
477
478         /* Start the transfer
479          */
480         DMA_OUT(&chan->regs->btt_ref,
481             hw->control & XILINX_DMA_MAX_TRANS_LEN);
482
483 out_unlock:
484         spin_unlock_irqrestore(&chan->lock, flags);
485 }
486
487 /* If sg mode, link the pending list to running list; if simple mode, get the
488  * head of the pending list and submit it to hw
489  */
490 static void xilinx_cdma_issue_pending(struct dma_chan *dchan)
491 {
492         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
493         xilinx_cdma_start_transfer(chan);
494 }
495
496 /* Stop the hardware, the ongoing transfer will be finished */
497 static void dma_halt(struct xilinx_dma_chan *chan)
498 {
499         int loop = XILINX_DMA_HALT_LOOP;
500
501         DMA_OUT(&chan->regs->cr,
502             DMA_IN(&chan->regs->cr) & ~XILINX_DMA_CR_RUNSTOP_MASK);
503
504         /* Wait for the hardware to halt
505          */
506         while (loop) {
507                 if (!(DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK))
508                         break;
509
510                 loop -= 1;
511         }
512
513         if (!loop) {
514                 pr_debug("Cannot stop channel %x: %x\n",
515                         (unsigned int)chan,
516                     (unsigned int)DMA_IN(&chan->regs->cr));
517                 chan->err = 1;
518         }
519
520         return;
521 }
522
523 /* Start the hardware. Transfers are not started yet */
524 static void dma_start(struct xilinx_dma_chan *chan)
525 {
526         int loop = XILINX_DMA_HALT_LOOP;
527
528         DMA_OUT(&chan->regs->cr,
529             DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RUNSTOP_MASK);
530
531         /* Wait for the hardware to start
532          */
533         while (loop) {
534                 if (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK)
535                         break;
536
537                 loop -= 1;
538         }
539
540         if (!loop) {
541                 pr_debug("Cannot start channel %x: %x\n",
542                         (unsigned int)chan,
543                     (unsigned int)DMA_IN(&chan->regs->cr));
544
545                 chan->err = 1;
546         }
547
548         return;
549 }
550
551
552 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
553 {
554         unsigned long flags;
555         struct xilinx_dma_desc_sw *desch, *desct;
556         struct xilinx_dma_desc_hw *hw;
557
558         if (chan->err)
559                 return;
560
561         spin_lock_irqsave(&chan->lock, flags);
562
563         if (list_empty(&chan->pending_list))
564                 goto out_unlock;
565
566         /* If hardware is busy, cannot submit
567          */
568         if (dma_is_running(chan) && !dma_is_idle(chan)) {
569                 dev_dbg(chan->dev, "DMA controller still busy\n");
570                 goto out_unlock;
571         }
572
573         /* If hardware is idle, then all descriptors on active list are
574          * done, start new transfers
575          */
576         dma_halt(chan);
577
578         if (chan->err)
579                 goto out_unlock;
580
581         if (chan->has_SG) {
582                 desch = list_first_entry(&chan->pending_list,
583                                 struct xilinx_dma_desc_sw, node);
584
585                 desct = container_of(chan->pending_list.prev,
586                                 struct xilinx_dma_desc_sw, node);
587
588                 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
589
590                 dma_start(chan);
591
592                 if (chan->err)
593                         goto out_unlock;
594                 list_splice_tail_init(&chan->pending_list, &chan->active_list);
595
596                 /* Enable interrupts
597                 */
598                 DMA_OUT(&chan->regs->cr,
599                         DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
600
601                 /* Update tail ptr register and start the transfer
602                 */
603                 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
604                 goto out_unlock;
605         }
606
607         /* In simple mode
608         */
609
610         dma_halt(chan);
611
612         if (chan->err)
613                 goto out_unlock;
614
615         printk(KERN_INFO "xilinx_dma_start_transfer::simple DMA mode\n");
616
617         desch = list_first_entry(&chan->pending_list,
618                                 struct xilinx_dma_desc_sw, node);
619
620         list_del(&desch->node);
621         list_add_tail(&desch->node, &chan->active_list);
622
623         dma_start(chan);
624
625         if (chan->err)
626                 goto out_unlock;
627
628         hw = &desch->hw;
629
630         /* Enable interrupts
631         */
632         DMA_OUT(&chan->regs->cr,
633                 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
634
635         DMA_OUT(&chan->regs->src, hw->buf_addr);
636
637         /* Start the transfer
638         */
639         DMA_OUT(&chan->regs->btt_ref,
640                 hw->control & XILINX_DMA_MAX_TRANS_LEN);
641
642 out_unlock:
643         spin_unlock_irqrestore(&chan->lock, flags);
644 }
645
646 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
647 {
648         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
649         xilinx_dma_start_transfer(chan);
650 }
651
652 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
653 {
654         unsigned long flags;
655         struct xilinx_dma_desc_sw *desch, *desct = NULL;
656         struct xilinx_dma_config *config;
657         u32 reg;
658         u8 *chan_base;
659
660         if (chan->err)
661                 return;
662
663         spin_lock_irqsave(&chan->lock, flags);
664
665         if (list_empty(&chan->pending_list))
666                 goto out_unlock;
667
668         /* If it is SG mode and hardware is busy, cannot submit
669          */
670         if (chan->has_SG && dma_is_running(chan) && !dma_is_idle(chan)) {
671                 dev_dbg(chan->dev, "DMA controller still busy\n");
672                 goto out_unlock;
673         }
674
675         /* If hardware is idle, then all descriptors on the running lists are
676          * done, start new transfers
677          */
678         if (chan->err)
679                 goto out_unlock;
680
681         if (chan->has_SG) {
682                 desch = list_first_entry(&chan->pending_list,
683                                 struct xilinx_dma_desc_sw, node);
684
685                 desct = container_of(chan->pending_list.prev,
686                                 struct xilinx_dma_desc_sw, node);
687
688                 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
689         }
690
691         /* Configure the hardware using info in the config structure */
692         config = &(chan->config);
693         reg = DMA_IN(&chan->regs->cr);
694
695         if (config->frm_cnt_en)
696                 reg |= XILINX_VDMA_FRMCNT_EN;
697         else
698                 reg &= ~XILINX_VDMA_FRMCNT_EN;
699
700         /* With SG, start with circular mode, so that BDs can be fetched.
701          * In direct register mode, if not parking, enable circular mode */
702         if ((chan->has_SG) || (!config->park))
703                 reg |= XILINX_VDMA_CIRC_EN;
704
705         if (config->park)
706                 reg &= ~XILINX_VDMA_CIRC_EN;
707
708         DMA_OUT(&chan->regs->cr, reg);
709
710         if ((config->park_frm >= 0) && (config->park_frm < chan->num_frms)) {
711                 if (config->direction == DMA_MEM_TO_DEV) {
712                         chan_base = (char *)chan->regs;
713                         DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
714                                         config->park_frm);
715                 } else {
716                         chan_base = ((char *)chan->regs -
717                                         XILINX_DMA_RX_CHANNEL_OFFSET);
718                         DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
719                                 config->park_frm << XILINX_VDMA_WR_REF_SHIFT);
720                 }
721         }
722
723         /* Start the hardware
724          */
725         dma_start(chan);
726
727         if (chan->err)
728                 goto out_unlock;
729         list_splice_tail_init(&chan->pending_list, &chan->active_list);
730
731         /* Enable interrupts
732          *
733          * park/genlock testing does not use interrupts */
734         if (!chan->config.disable_intr) {
735                 DMA_OUT(&chan->regs->cr,
736                    DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
737         } else {
738                 DMA_OUT(&chan->regs->cr,
739                    DMA_IN(&chan->regs->cr) |
740                         chan->config.disable_intr << XILINX_DMA_IRQ_SHIFT);
741         }
742
743         /* Start the transfer
744          */
745         if (chan->has_SG)
746                 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
747         else
748                 DMA_OUT(&chan->addr_regs->vsize, config->vsize);
749
750 out_unlock:
751         spin_unlock_irqrestore(&chan->lock, flags);
752 }
753
754 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
755 {
756         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
757         xilinx_vdma_start_transfer(chan);
758 }
759
760 /**
761  * xilinx_dma_update_completed_cookie - Update the completed cookie.
762  * @chan : xilinx DMA channel
763  *
764  * CONTEXT: hardirq
765  */
766 static void xilinx_dma_update_completed_cookie(struct xilinx_dma_chan *chan)
767 {
768         struct xilinx_dma_desc_sw *desc = NULL;
769         struct xilinx_dma_desc_hw *hw = NULL;
770         unsigned long flags;
771         dma_cookie_t cookie = -EBUSY;
772         int done = 0;
773
774         spin_lock_irqsave(&chan->lock, flags);
775
776         if (list_empty(&chan->active_list)) {
777                 dev_dbg(chan->dev, "no running descriptors\n");
778                 goto out_unlock;
779         }
780
781         /* Get the last completed descriptor, update the cookie to that */
782         list_for_each_entry(desc, &chan->active_list, node) {
783                 if ((!(chan->feature & XILINX_DMA_IP_VDMA)) && chan->has_SG) {
784                         hw = &desc->hw;
785
786                         /* If a BD has no status bits set, hw has it */
787                         if (!(hw->status & XILINX_DMA_BD_STS_ALL_MASK)) {
788                                 break;
789                         } else {
790                                 done = 1;
791                                 cookie = desc->async_tx.cookie;
792                         }
793                 } else {
794                         /* In non-SG mode, all active entries are done */
795                         done = 1;
796                         cookie = desc->async_tx.cookie;
797                 }
798         }
799
800         if (done)
801                 chan->completed_cookie = cookie;
802
803 out_unlock:
804         spin_unlock_irqrestore(&chan->lock, flags);
805 }
806
807 /* Reset hardware
808  */
809 static int dma_init(struct xilinx_dma_chan *chan)
810 {
811         int loop = XILINX_DMA_RESET_LOOP;
812         u32 tmp;
813
814         DMA_OUT(&chan->regs->cr,
815                DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RESET_MASK);
816
817         tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
818
819         /* Wait for the hardware to finish reset
820          */
821         while (loop && tmp) {
822                 tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
823                 loop -= 1;
824         }
825
826         if (!loop) {
827                 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
828                     DMA_IN(&chan->regs->cr), DMA_IN(&chan->regs->sr));
829                 return 1;
830         }
831
832         /* For Axi CDMA, always do sg transfers if sg mode is built in
833          */
834         if ((chan->feature & XILINX_DMA_IP_CDMA) && chan->has_SG)
835                 DMA_OUT(&chan->regs->cr, tmp | XILINX_CDMA_CR_SGMODE_MASK);
836
837         return 0;
838 }
839
840
841 static irqreturn_t dma_intr_handler(int irq, void *data)
842 {
843         struct xilinx_dma_chan *chan = data;
844         int update_cookie = 0;
845         int to_transfer = 0;
846         u32 stat, reg;
847
848         reg = DMA_IN(&chan->regs->cr);
849
850         /* Disable intr
851          */
852         DMA_OUT(&chan->regs->cr,
853            reg & ~XILINX_DMA_XR_IRQ_ALL_MASK);
854
855         stat = DMA_IN(&chan->regs->sr);
856         if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK))
857                 return IRQ_NONE;
858
859         /* Ack the interrupts
860          */
861         DMA_OUT(&chan->regs->sr, XILINX_DMA_XR_IRQ_ALL_MASK);
862
863         /* Check for only the interrupts which are enabled
864          */
865         stat &= (reg & XILINX_DMA_XR_IRQ_ALL_MASK);
866
867         if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK) {
868                 if ((chan->feature & XILINX_DMA_IP_VDMA)
869                         && chan->flush_fsync) {
870                         /* VDMA Recoverable Errors, only when
871                            C_FLUSH_ON_FSYNC is enabled */
872                         u32 error = DMA_IN(&chan->regs->sr) &
873                                 XILINX_VDMA_SR_ERR_RECOVER_MASK;
874                         if (error)
875                                 DMA_OUT(&chan->regs->sr, error);
876                         else
877                                 chan->err = 1;
878                 } else {
879                         dev_err(chan->dev,
880                                 "Channel %x has errors %x, cdr %x tdr %x\n",
881                                 (unsigned int)chan,
882                                 (unsigned int)DMA_IN(&chan->regs->sr),
883                                 (unsigned int)DMA_IN(&chan->regs->cdr),
884                                 (unsigned int)DMA_IN(&chan->regs->tdr));
885                                 chan->err = 1;
886                 }
887         }
888
889         /* Device takes too long to do the transfer when user requires
890          * responsiveness
891          */
892         if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK)
893                 dev_dbg(chan->dev, "Inter-packet latency too long\n");
894
895         if (stat & XILINX_DMA_XR_IRQ_IOC_MASK) {
896                 update_cookie = 1;
897                 to_transfer = 1;
898         }
899
900         if (update_cookie)
901                 xilinx_dma_update_completed_cookie(chan);
902
903         if (to_transfer)
904                 chan->start_transfer(chan);
905
906         tasklet_schedule(&chan->tasklet);
907         return IRQ_HANDLED;
908 }
909
910 static void dma_do_tasklet(unsigned long data)
911 {
912         struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
913
914         xilinx_chan_desc_cleanup(chan);
915 }
916
917 /* Append the descriptor list to the pending list */
918 static void append_desc_queue(struct xilinx_dma_chan *chan,
919                         struct xilinx_dma_desc_sw *desc)
920 {
921         struct xilinx_dma_desc_sw *tail = container_of(chan->pending_list.prev,
922                                         struct xilinx_dma_desc_sw, node);
923         struct xilinx_dma_desc_hw *hw;
924
925         if (list_empty(&chan->pending_list))
926                 goto out_splice;
927
928         /* Add the hardware descriptor to the chain of hardware descriptors
929          * that already exists in memory.
930          */
931         hw = &(tail->hw);
932         hw->next_desc = (u32)desc->async_tx.phys;
933
934         /* Add the software descriptor and all children to the list
935          * of pending transactions
936          */
937 out_splice:
938         list_splice_tail_init(&desc->tx_list, &chan->pending_list);
939 }
940
941 /* Assign cookie to each descriptor, and append the descriptors to the pending
942  * list
943  */
944 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
945 {
946         struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
947         struct xilinx_dma_desc_sw *desc = container_of(tx,
948                                 struct xilinx_dma_desc_sw, async_tx);
949         struct xilinx_dma_desc_sw *child;
950         unsigned long flags;
951         dma_cookie_t cookie = -EBUSY;
952
953         if (chan->err) {
954                 /* If reset fails, need to hard reset the system.
955                  * Channel is no longer functional
956                  */
957                 if (!dma_init(chan))
958                         chan->err = 0;
959                 else
960                         return cookie;
961         }
962
963         spin_lock_irqsave(&chan->lock, flags);
964
965         /*
966          * assign cookies to all of the software descriptors
967          * that make up this transaction
968          */
969         cookie = chan->cookie;
970         list_for_each_entry(child, &desc->tx_list, node) {
971                 cookie++;
972                 if (cookie < 0)
973                         cookie = DMA_MIN_COOKIE;
974
975                 child->async_tx.cookie = cookie;
976         }
977
978         chan->cookie = cookie;
979
980
981         /* put this transaction onto the tail of the pending queue */
982         append_desc_queue(chan, desc);
983
984         spin_unlock_irqrestore(&chan->lock, flags);
985
986         return cookie;
987 }
988
989 static struct xilinx_dma_desc_sw *xilinx_dma_alloc_descriptor(
990                                         struct xilinx_dma_chan *chan)
991 {
992         struct xilinx_dma_desc_sw *desc;
993         dma_addr_t pdesc;
994
995         desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
996         if (!desc) {
997                 dev_dbg(chan->dev, "out of memory for desc\n");
998                 return NULL;
999         }
1000
1001         memset(desc, 0, sizeof(*desc));
1002         INIT_LIST_HEAD(&desc->tx_list);
1003         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1004         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1005         desc->async_tx.phys = pdesc;
1006
1007         return desc;
1008 }
1009
1010 /**
1011  * xilinx_dma_prep_memcpy - prepare descriptors for a memcpy transaction
1012  * @dchan: DMA channel
1013  * @dma_dst: destination address
1014  * @dma_src: source address
1015  * @len: transfer length
1016  * @flags: transfer ack flags
1017  */
1018 static struct dma_async_tx_descriptor *xilinx_dma_prep_memcpy(
1019         struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
1020         size_t len, unsigned long flags)
1021 {
1022         struct xilinx_dma_chan *chan;
1023         struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new;
1024         struct xilinx_dma_desc_hw *hw, *prev_hw;
1025         size_t copy;
1026         dma_addr_t src = dma_src;
1027         dma_addr_t dst = dma_dst;
1028
1029         if (!dchan)
1030                 return NULL;
1031
1032         if (!len)
1033                 return NULL;
1034
1035         chan = to_xilinx_chan(dchan);
1036
1037         if (chan->err) {
1038
1039                 /* If reset fails, need to hard reset the system.
1040                  * Channel is no longer functional
1041                  */
1042                 if (!dma_init(chan))
1043                         chan->err = 0;
1044                 else
1045                         return NULL;
1046         }
1047
1048         /* If build does not have Data Realignment Engine (DRE),
1049          * src has to be aligned
1050          */
1051         if (!chan->has_DRE) {
1052                 if ((dma_src &
1053                    (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK)) ||
1054                    (dma_dst &
1055                    (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK))) {
1056
1057                         dev_err(chan->dev,
1058                           "Source or destination address not aligned when no DRE\n");
1059
1060                         return NULL;
1061                 }
1062         }
1063
1064         do {
1065
1066                 /* Allocate descriptor from DMA pool */
1067                 new = xilinx_dma_alloc_descriptor(chan);
1068                 if (!new) {
1069                         dev_err(chan->dev,
1070                             "No free memory for link descriptor\n");
1071                         goto fail;
1072                 }
1073
1074                 copy = min(len, (size_t)chan->max_len);
1075
1076                 /* if lite build, transfer cannot cross page boundary
1077                  */
1078                 if (chan->is_lite)
1079                         copy = min(copy, (size_t)(PAGE_MASK -
1080                                                 (src & PAGE_MASK)));
1081
1082                 if (!copy) {
1083                         dev_err(chan->dev,
1084                             "Got zero transfer length for %x\n",
1085                                         (unsigned int)src);
1086                         goto fail;
1087                 }
1088
1089                 hw = &(new->hw);
1090                 hw->control =
1091                       (hw->control & ~XILINX_DMA_MAX_TRANS_LEN) | copy;
1092                 hw->buf_addr = src;
1093                 hw->addr_vsize = dst;
1094
1095                 if (!first)
1096                         first = new;
1097                 else {
1098                         prev_hw = &(prev->hw);
1099                         prev_hw->next_desc = new->async_tx.phys;
1100                 }
1101
1102                 new->async_tx.cookie = 0;
1103                 async_tx_ack(&new->async_tx);
1104
1105                 prev = new;
1106                 len -= copy;
1107                 src += copy;
1108                 dst += copy;
1109
1110                 /* Insert the descriptor to the list */
1111                 list_add_tail(&new->node, &first->tx_list);
1112         } while (len);
1113
1114         /* Link the last BD with the first BD */
1115         hw->next_desc = first->async_tx.phys;
1116
1117         new->async_tx.flags = flags; /* client is in control of this ack */
1118         new->async_tx.cookie = -EBUSY;
1119
1120         return &first->async_tx;
1121
1122 fail:
1123         if (!first)
1124                 return NULL;
1125
1126         xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1127         return NULL;
1128 }
1129
1130 /**
1131  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1132  * @chan: DMA channel
1133  * @sgl: scatterlist to transfer to/from
1134  * @sg_len: number of entries in @scatterlist
1135  * @direction: DMA direction
1136  * @flags: transfer ack flags
1137  */
1138 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1139         struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1140         enum dma_transfer_direction direction, unsigned long flags,
1141         void *context)
1142 {
1143         struct xilinx_dma_chan *chan;
1144         struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1145         struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1146
1147         size_t copy;
1148
1149         int i;
1150         struct scatterlist *sg;
1151         size_t sg_used;
1152         dma_addr_t dma_src;
1153
1154 #ifdef TEST_DMA_WITH_LOOPBACK
1155         int total_len;
1156 #endif
1157         if (!dchan)
1158                 return NULL;
1159
1160         chan = to_xilinx_chan(dchan);
1161
1162         if (chan->direction != direction)
1163                 return NULL;
1164
1165 #ifdef TEST_DMA_WITH_LOOPBACK
1166         total_len = 0;
1167
1168         for_each_sg(sgl, sg, sg_len, i) {
1169                 total_len += sg_dma_len(sg);
1170         }
1171 #endif
1172         /*
1173          * Build transactions using information in the scatter gather list
1174          */
1175         for_each_sg(sgl, sg, sg_len, i) {
1176                 sg_used = 0;
1177
1178                 /* Loop until the entire scatterlist entry is used */
1179                 while (sg_used < sg_dma_len(sg)) {
1180
1181                         /* Allocate the link descriptor from DMA pool */
1182                         new = xilinx_dma_alloc_descriptor(chan);
1183                         if (!new) {
1184                                 dev_err(chan->dev, "No free memory for "
1185                                      "link descriptor\n");
1186                                 goto fail;
1187                         }
1188
1189                         /*
1190                          * Calculate the maximum number of bytes to transfer,
1191                          * making sure it is less than the hw limit
1192                          */
1193                         copy = min((size_t)(sg_dma_len(sg) - sg_used),
1194                                 (size_t)chan->max_len);
1195                         hw = &(new->hw);
1196
1197                         dma_src = sg_dma_address(sg) + sg_used;
1198
1199                         hw->buf_addr = dma_src;
1200
1201                         /* Fill in the descriptor */
1202                         hw->control = copy;
1203
1204                         /*
1205                          * If this is not the first descriptor, chain the
1206                          * current descriptor after the previous descriptor
1207                          *
1208                          * For the first DMA_MEM_TO_DEV transfer, set SOP
1209                          */
1210                         if (!first) {
1211                                 first = new;
1212                                 if (direction == DMA_MEM_TO_DEV) {
1213                                         hw->control |= XILINX_DMA_BD_SOP;
1214 #ifdef TEST_DMA_WITH_LOOPBACK
1215                                         hw->app_4 = total_len;
1216 #endif
1217                                 }
1218                         } else {
1219                                 prev_hw = &(prev->hw);
1220                                 prev_hw->next_desc = new->async_tx.phys;
1221                         }
1222
1223                         new->async_tx.cookie = 0;
1224                         async_tx_ack(&new->async_tx);
1225
1226                         prev = new;
1227                         sg_used += copy;
1228
1229                         /* Insert the link descriptor into the LD ring */
1230                         list_add_tail(&new->node, &first->tx_list);
1231                 }
1232         }
1233
1234         /* Link the last BD with the first BD */
1235         hw->next_desc = first->async_tx.phys;
1236
1237         if (direction == DMA_MEM_TO_DEV)
1238                 hw->control |= XILINX_DMA_BD_EOP;
1239
1240         /* All scatter gather list entries has length == 0 */
1241         if (!first || !new)
1242                 return NULL;
1243
1244         new->async_tx.flags = flags;
1245         new->async_tx.cookie = -EBUSY;
1246
1247         /* Set EOP to the last link descriptor of new list */
1248         hw->control |= XILINX_DMA_BD_EOP;
1249
1250         return &first->async_tx;
1251
1252 fail:
1253         /* If first was not set, then we failed to allocate the very first
1254          * descriptor, and we're done */
1255         if (!first)
1256                 return NULL;
1257
1258         /*
1259          * First is set, so all of the descriptors we allocated have been added
1260          * to first->tx_list, INCLUDING "first" itself. Therefore we
1261          * must traverse the list backwards freeing each descriptor in turn
1262          */
1263         xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1264
1265         return NULL;
1266 }
1267
1268 /**
1269  * xilinx_vdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1270  * @chan: VDMA channel
1271  * @sgl: scatterlist to transfer to/from
1272  * @sg_len: number of entries in @scatterlist
1273  * @direction: DMA direction
1274  * @flags: transfer ack flags
1275  */
1276 static struct dma_async_tx_descriptor *xilinx_vdma_prep_slave_sg(
1277         struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1278         enum dma_transfer_direction direction, unsigned long flags,
1279         void *context)
1280 {
1281         struct xilinx_dma_chan *chan;
1282         struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1283         struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1284         int i;
1285         struct scatterlist *sg;
1286         dma_addr_t dma_src;
1287
1288         if (!dchan)
1289                 return NULL;
1290
1291         chan = to_xilinx_chan(dchan);
1292
1293         if (chan->direction != direction)
1294                 return NULL;
1295
1296         /* Enforce one sg entry for one frame */
1297         if (sg_len != chan->num_frms) {
1298                 dev_err(chan->dev, "number of entries %d not the "
1299                     "same as num stores %d\n", sg_len, chan->num_frms);
1300
1301                 return NULL;
1302         }
1303
1304         if (!chan->has_SG) {
1305                 DMA_OUT(&chan->addr_regs->hsize, chan->config.hsize);
1306                 DMA_OUT(&chan->addr_regs->frmdly_stride,
1307                      chan->config.frm_dly << XILINX_VDMA_FRMDLY_SHIFT |
1308                      chan->config.stride);
1309         }
1310
1311         /* Build transactions using information in the scatter gather list
1312          */
1313         for_each_sg(sgl, sg, sg_len, i) {
1314
1315                 /* Allocate the link descriptor from DMA pool */
1316                 new = xilinx_dma_alloc_descriptor(chan);
1317                 if (!new) {
1318                         dev_err(chan->dev, "No free memory for "
1319                             "link descriptor\n");
1320                         goto fail;
1321                 }
1322
1323                 /*
1324                  * Calculate the maximum number of bytes to transfer,
1325                  * making sure it is less than the hw limit
1326                  */
1327                 hw = &(new->hw);
1328
1329                 dma_src = sg_dma_address(sg);
1330                 if (chan->has_SG) {
1331                         hw->buf_addr = dma_src;
1332
1333                         /* Fill in the descriptor */
1334                         hw->addr_vsize = chan->config.vsize;
1335                         hw->hsize = chan->config.hsize;
1336                         hw->control = (chan->config.frm_dly <<
1337                                         XILINX_VDMA_FRMDLY_SHIFT) |
1338                                         chan->config.stride;
1339                 } else {
1340                         /* Update the registers */
1341                         DMA_OUT(&(chan->addr_regs->buf_addr[i]), dma_src);
1342                 }
1343
1344                 /* If this is not the first descriptor, chain the
1345                  * current descriptor after the previous descriptor
1346                  */
1347                 if (!first) {
1348                         first = new;
1349                 } else {
1350                         prev_hw = &(prev->hw);
1351                         prev_hw->next_desc = new->async_tx.phys;
1352                 }
1353
1354                 new->async_tx.cookie = 0;
1355                 async_tx_ack(&new->async_tx);
1356
1357                 prev = new;
1358
1359                 /* Insert the link descriptor into the list */
1360                 list_add_tail(&new->node, &first->tx_list);
1361         }
1362
1363         /* Link the last BD with the first BD */
1364         hw->next_desc = first->async_tx.phys;
1365
1366         if (!first || !new)
1367                 return NULL;
1368
1369         new->async_tx.flags = flags;
1370         new->async_tx.cookie = -EBUSY;
1371
1372         return &first->async_tx;
1373
1374 fail:
1375         /* If first was not set, then we failed to allocate the very first
1376          * descriptor, and we're done */
1377         if (!first)
1378                 return NULL;
1379
1380         /* First is set, so all of the descriptors we allocated have been added
1381          * to first->tx_list, INCLUDING "first" itself. Therefore we
1382          * must traverse the list backwards freeing each descriptor in turn
1383          */
1384         xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1385         return NULL;
1386 }
1387
1388 /* Run-time device configuration for Axi DMA and Axi CDMA */
1389 static int xilinx_dma_device_control(struct dma_chan *dchan,
1390                                   enum dma_ctrl_cmd cmd, unsigned long arg)
1391 {
1392         struct xilinx_dma_chan *chan;
1393         unsigned long flags;
1394
1395         if (!dchan)
1396                 return -EINVAL;
1397
1398         chan = to_xilinx_chan(dchan);
1399
1400         if (cmd == DMA_TERMINATE_ALL) {
1401                 /* Halt the DMA engine */
1402                 dma_halt(chan);
1403
1404                 spin_lock_irqsave(&chan->lock, flags);
1405
1406                 /* Remove and free all of the descriptors in the lists */
1407                 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1408                 xilinx_dma_free_desc_list(chan, &chan->active_list);
1409
1410                 spin_unlock_irqrestore(&chan->lock, flags);
1411                 return 0;
1412         } else if (cmd == DMA_SLAVE_CONFIG) {
1413                 /* Configure interrupt coalescing and delay counter
1414                  * Use value XILINX_DMA_NO_CHANGE to signal no change
1415                  */
1416                 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1417                 u32 reg = DMA_IN(&chan->regs->cr);
1418
1419                 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1420                         reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1421                         reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1422
1423                         chan->config.coalesc = cfg->coalesc;
1424                 }
1425
1426                 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1427                         reg &= ~XILINX_DMA_XR_DELAY_MASK;
1428                         reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1429                         chan->config.delay = cfg->delay;
1430                 }
1431
1432                 DMA_OUT(&chan->regs->cr, reg);
1433
1434                 return 0;
1435         } else
1436                 return -ENXIO;
1437 }
1438
1439 /* Run-time configuration for Axi VDMA, supports:
1440  * . halt the channel
1441  * . configure interrupt coalescing and inter-packet delay threshold
1442  * . start/stop parking
1443  * . enable genlock
1444  * . set transfer information using config struct
1445  */
1446 static int xilinx_vdma_device_control(struct dma_chan *dchan,
1447                                   enum dma_ctrl_cmd cmd, unsigned long arg)
1448 {
1449         struct xilinx_dma_chan *chan;
1450         unsigned long flags;
1451
1452         if (!dchan)
1453                 return -EINVAL;
1454
1455         chan = to_xilinx_chan(dchan);
1456
1457         if (cmd == DMA_TERMINATE_ALL) {
1458                 /* Halt the DMA engine */
1459                 dma_halt(chan);
1460
1461                 spin_lock_irqsave(&chan->lock, flags);
1462
1463                 /* Remove and free all of the descriptors in the lists */
1464                 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1465                 xilinx_dma_free_desc_list(chan, &chan->active_list);
1466
1467                 spin_unlock_irqrestore(&chan->lock, flags);
1468                 return 0;
1469         } else if (cmd == DMA_SLAVE_CONFIG) {
1470                 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1471                 u32 reg;
1472
1473                 if (cfg->reset)
1474                         dma_init(chan);
1475
1476                 reg = DMA_IN(&chan->regs->cr);
1477
1478                 /* If vsize is -1, it is park-related operations */
1479                 if (cfg->vsize == -1) {
1480                         if (cfg->park)
1481                                 reg &= ~XILINX_VDMA_CIRC_EN;
1482                         else
1483                                 reg |= XILINX_VDMA_CIRC_EN;
1484
1485                         DMA_OUT(&chan->regs->cr, reg);
1486                         return 0;
1487                 }
1488
1489                 /* If hsize is -1, it is interrupt threshold settings */
1490                 if (cfg->hsize == -1) {
1491                         if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1492                                 reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1493                                 reg |= cfg->coalesc <<
1494                                         XILINX_DMA_COALESCE_SHIFT;
1495                                 chan->config.coalesc = cfg->coalesc;
1496                         }
1497
1498                         if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1499                                 reg &= ~XILINX_DMA_XR_DELAY_MASK;
1500                                 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1501                                 chan->config.delay = cfg->delay;
1502                         }
1503
1504                         DMA_OUT(&chan->regs->cr, reg);
1505                         return 0;
1506                 }
1507
1508                 /* Transfer information */
1509                 chan->config.vsize = cfg->vsize;
1510                 chan->config.hsize = cfg->hsize;
1511                 chan->config.stride = cfg->stride;
1512                 chan->config.frm_dly = cfg->frm_dly;
1513                 chan->config.park = cfg->park;
1514
1515                 /* genlock settings */
1516                 chan->config.gen_lock = cfg->gen_lock;
1517                 chan->config.master = cfg->master;
1518
1519                 if (cfg->gen_lock) {
1520                         if (chan->genlock) {
1521                                 reg |= XILINX_VDMA_SYNC_EN;
1522                                 reg |= cfg->master << XILINX_VDMA_MSTR_SHIFT;
1523                         }
1524                 }
1525
1526                 chan->config.frm_cnt_en = cfg->frm_cnt_en;
1527                 if (cfg->park)
1528                         chan->config.park_frm = cfg->park_frm;
1529
1530                 chan->config.coalesc = cfg->coalesc;
1531                 chan->config.delay = cfg->delay;
1532                 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1533                         reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1534                         chan->config.coalesc = cfg->coalesc;
1535                 }
1536
1537                 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1538                         reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1539                         chan->config.delay = cfg->delay;
1540                 }
1541
1542                 chan->config.disable_intr = cfg->disable_intr;
1543
1544                 if (cfg->ext_fsync)
1545                         reg |= cfg->ext_fsync << XILINX_VDMA_EXTFSYNC_SHIFT;
1546
1547                 DMA_OUT(&chan->regs->cr, reg);
1548                 return 0;
1549         } else
1550                 return -ENXIO;
1551 }
1552
1553
1554 /* Logarithm function to compute alignment shift
1555  *
1556  * Only deals with value less than 4096.
1557  */
1558 static int my_log(int value)
1559 {
1560         int i = 0;
1561         while ((1 << i) < value) {
1562                 i++;
1563
1564                 if (i >= 12)
1565                         return 0;
1566         }
1567
1568         return i;
1569 }
1570
1571 #ifdef CONFIG_OF
1572
1573 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1574 {
1575         irq_dispose_mapping(chan->irq);
1576         list_del(&chan->common.device_node);
1577         kfree(chan);
1578 }
1579
1580 /*
1581  * Probing channels
1582  *
1583  * . Get channel features from the device tree entry
1584  * . Initialize special channel handling routines
1585  */
1586 static int __devinit xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1587         struct device_node *node, u32 feature)
1588 {
1589         struct xilinx_dma_chan *chan;
1590         int err;
1591         int *value;
1592         u32 width = 0, device_id = 0, flush_fsync = 0;
1593
1594         /* alloc channel */
1595         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1596         if (!chan) {
1597                 dev_err(xdev->dev, "no free memory for DMA channels!\n");
1598                 err = -ENOMEM;
1599                 goto out_return;
1600         }
1601
1602         chan->feature = feature;
1603         chan->is_lite = 0;
1604         chan->has_DRE = 0;
1605         chan->has_SG = 0;
1606         chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
1607
1608         value = (int *)of_get_property(node, "xlnx,include-dre",
1609                         NULL);
1610         if (value) {
1611                 if (be32_to_cpup(value) == 1)
1612                         chan->has_DRE = 1;
1613         }
1614
1615         value = (int *)of_get_property(node, "xlnx,genlock-mode",
1616                         NULL);
1617         if (value) {
1618                 if (be32_to_cpup(value) == 1)
1619                         chan->genlock = 1;
1620         }
1621
1622         value = (int *)of_get_property(node,
1623                         "xlnx,datawidth",
1624                         NULL);
1625         if (value) {
1626                 width = be32_to_cpup(value) >> 3; /* convert bits to bytes */
1627
1628                 /* If data width is greater than 8 bytes, DRE is not in hw */
1629                 if (width > 8)
1630                         chan->has_DRE = 0;
1631
1632                 chan->feature |= width - 1;
1633         }
1634
1635         value = (int *)of_get_property(node, "xlnx,device-id", NULL);
1636         if (value)
1637                 device_id = be32_to_cpup(value);
1638
1639         value = (int *)of_get_property(node, "xlnx,flush-fsync", NULL);
1640         if (value)
1641                 flush_fsync = be32_to_cpup(value);
1642
1643         if (feature & XILINX_DMA_IP_CDMA) {
1644                 chan->direction = DMA_MEM_TO_MEM;
1645                 chan->start_transfer = xilinx_cdma_start_transfer;
1646
1647                 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1648                                 XILINX_DMA_FTR_HAS_SG_SHIFT;
1649
1650                 value = (int *)of_get_property(node,
1651                                 "xlnx,lite-mode", NULL);
1652                 if (value) {
1653                         if (be32_to_cpup(value) == 1) {
1654                                 chan->is_lite = 1;
1655                                 value = (int *)of_get_property(node,
1656                                     "xlnx,max-burst-len", NULL);
1657                                 if (value) {
1658                                         if (!width) {
1659                                                 dev_err(xdev->dev,
1660                                                   "Lite mode without data width property\n");
1661                                                 goto out_free_chan;
1662                                         }
1663                                         chan->max_len = width *
1664                                                 be32_to_cpup(value);
1665                                 }
1666                         }
1667                 }
1668         }
1669
1670         if (feature & XILINX_DMA_IP_DMA) {
1671                 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1672                                         XILINX_DMA_FTR_HAS_SG_SHIFT;
1673
1674                 chan->start_transfer = xilinx_dma_start_transfer;
1675
1676                 if (of_device_is_compatible(node,
1677                          "xlnx,axi-dma-mm2s-channel"))
1678                         chan->direction = DMA_MEM_TO_DEV;
1679
1680                 if (of_device_is_compatible(node,
1681                                 "xlnx,axi-dma-s2mm-channel"))
1682                         chan->direction = DMA_DEV_TO_MEM;
1683
1684         }
1685
1686         if (feature & XILINX_DMA_IP_VDMA) {
1687                 chan->start_transfer = xilinx_vdma_start_transfer;
1688
1689                 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1690                         XILINX_DMA_FTR_HAS_SG_SHIFT;
1691
1692                 if (of_device_is_compatible(node,
1693                                 "xlnx,axi-vdma-mm2s-channel")) {
1694                         chan->direction = DMA_MEM_TO_DEV;
1695                         if (!chan->has_SG) {
1696                                 chan->addr_regs = (struct vdma_addr_regs *)
1697                                     ((u32)xdev->regs +
1698                                          XILINX_VDMA_DIRECT_REG_OFFSET);
1699                         }
1700                         if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1701                                 flush_fsync == XILINX_VDMA_FLUSH_MM2S)
1702                                 chan->flush_fsync = 1;
1703                 }
1704
1705                 if (of_device_is_compatible(node,
1706                                 "xlnx,axi-vdma-s2mm-channel")) {
1707                         chan->direction = DMA_DEV_TO_MEM;
1708                         if (!chan->has_SG) {
1709                                 chan->addr_regs = (struct vdma_addr_regs *)
1710                                     ((u32)xdev->regs +
1711                                         XILINX_VDMA_DIRECT_REG_OFFSET +
1712                                         XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
1713                         }
1714                         if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1715                                 flush_fsync == XILINX_VDMA_FLUSH_S2MM)
1716                                 chan->flush_fsync = 1;
1717                 }
1718         }
1719
1720         chan->regs = (struct xdma_regs *)xdev->regs;
1721         chan->id = 0;
1722
1723         if (chan->direction == DMA_DEV_TO_MEM) {
1724                 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
1725                                         XILINX_DMA_RX_CHANNEL_OFFSET);
1726                 chan->id = 1;
1727         }
1728
1729         /* Used by dmatest channel matching in slave transfers
1730          * Can change it to be a structure to have more matching information
1731          */
1732         chan->private = (chan->direction & 0xFF) |
1733                 (chan->feature & XILINX_DMA_IP_MASK) |
1734                 (device_id << XILINX_DMA_DEVICE_ID_SHIFT);
1735         chan->common.private = (void *)&(chan->private);
1736
1737         if (!chan->has_DRE)
1738                 xdev->common.copy_align = my_log(width);
1739
1740         chan->dev = xdev->dev;
1741         xdev->chan[chan->id] = chan;
1742
1743         tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1744
1745         /* Initialize the channel */
1746         if (dma_init(chan)) {
1747                 dev_err(xdev->dev, "Reset channel failed\n");
1748                 goto out_free_chan;
1749         }
1750
1751
1752         spin_lock_init(&chan->lock);
1753         INIT_LIST_HEAD(&chan->pending_list);
1754         INIT_LIST_HEAD(&chan->active_list);
1755
1756         chan->common.device = &xdev->common;
1757
1758         /* find the IRQ line, if it exists in the device tree */
1759         chan->irq = irq_of_parse_and_map(node, 0);
1760         err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
1761                                 "xilinx-dma-controller", chan);
1762         if (err) {
1763                 dev_err(xdev->dev, "unable to request IRQ\n");
1764                 goto out_free_irq;
1765         }
1766
1767         /* Add the channel to DMA device channel list */
1768         list_add_tail(&chan->common.device_node, &xdev->common.channels);
1769         xdev->common.chancnt++;
1770
1771         return 0;
1772
1773 out_free_irq:
1774         irq_dispose_mapping(chan->irq);
1775 out_free_chan:
1776         kfree(chan);
1777 out_return:
1778         return err;
1779 }
1780
1781 static int __devinit xilinx_dma_of_probe(struct platform_device *op)
1782 {
1783         struct xilinx_dma_device *xdev;
1784         struct device_node *child, *node;
1785         int err;
1786         int *value;
1787         int num_frames = 0;
1788
1789         dev_info(&op->dev, "Probing xilinx axi dma engines\n");
1790
1791         xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
1792         if (!xdev) {
1793                 dev_err(&op->dev, "Not enough memory for device\n");
1794                 err = -ENOMEM;
1795                 goto out_return;
1796         }
1797
1798         xdev->dev = &(op->dev);
1799         INIT_LIST_HEAD(&xdev->common.channels);
1800
1801         node = op->dev.of_node;
1802         xdev->feature = 0;
1803
1804         /* iomap registers */
1805         xdev->regs = of_iomap(node, 0);
1806         if (!xdev->regs) {
1807                 dev_err(&op->dev, "unable to iomap registers\n");
1808                 err = -ENOMEM;
1809                 goto out_free_xdev;
1810         }
1811
1812         /* Axi CDMA only does memcpy
1813          */
1814         if (of_device_is_compatible(node, "xlnx,axi-cdma")) {
1815                 xdev->feature |= XILINX_DMA_IP_CDMA;
1816
1817                 value = (int *)of_get_property(node, "xlnx,include-sg",
1818                                 NULL);
1819                 if (value) {
1820                         if (be32_to_cpup(value) == 1)
1821                                 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1822                 }
1823
1824                 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
1825                 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
1826                 xdev->common.device_control = xilinx_dma_device_control;
1827                 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
1828         }
1829
1830         /* Axi DMA and VDMA only do slave transfers
1831          */
1832         if (of_device_is_compatible(node, "xlnx,axi-dma")) {
1833
1834                 xdev->feature |= XILINX_DMA_IP_DMA;
1835                 value = (int *)of_get_property(node,
1836                                 "xlnx,sg-include-stscntrl-strm",
1837                                 NULL);
1838                 if (value) {
1839                         if (be32_to_cpup(value) == 1) {
1840                                 xdev->feature |= (XILINX_DMA_FTR_STSCNTRL_STRM |
1841                                                         XILINX_DMA_FTR_HAS_SG);
1842                         }
1843                 }
1844
1845                 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1846                 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1847                 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
1848                 xdev->common.device_control = xilinx_dma_device_control;
1849                 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
1850         }
1851
1852         if (of_device_is_compatible(node, "xlnx,axi-vdma")) {
1853                 xdev->feature |= XILINX_DMA_IP_VDMA;
1854
1855                 value = (int *)of_get_property(node, "xlnx,include-sg",
1856                                 NULL);
1857                 if (value) {
1858                         if (be32_to_cpup(value) == 1)
1859                                 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1860                 }
1861
1862                 value = (int *)of_get_property(node, "xlnx,num-fstores",
1863                         NULL);
1864                 if (value)
1865                         num_frames      = be32_to_cpup(value);
1866
1867                 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1868                 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1869                 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
1870                 xdev->common.device_control = xilinx_vdma_device_control;
1871                 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1872         }
1873
1874         xdev->common.device_alloc_chan_resources =
1875                                 xilinx_dma_alloc_chan_resources;
1876         xdev->common.device_free_chan_resources =
1877                                 xilinx_dma_free_chan_resources;
1878         xdev->common.device_tx_status = xilinx_tx_status;
1879         xdev->common.dev = &op->dev;
1880
1881         dev_set_drvdata(&op->dev, xdev);
1882
1883         for_each_child_of_node(node, child) {
1884                 xilinx_dma_chan_probe(xdev, child, xdev->feature);
1885         }
1886
1887         if (xdev->feature & XILINX_DMA_IP_VDMA) {
1888                 int i;
1889
1890                 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1891                         if (xdev->chan[i])
1892                                 xdev->chan[i]->num_frms = num_frames;
1893                 }
1894         }
1895
1896         dma_async_device_register(&xdev->common);
1897
1898         return 0;
1899
1900 out_free_xdev:
1901         kfree(xdev);
1902
1903 out_return:
1904         return err;
1905 }
1906
1907 static int __devexit xilinx_dma_of_remove(struct platform_device *op)
1908 {
1909         struct xilinx_dma_device *xdev;
1910         int i;
1911
1912         xdev = dev_get_drvdata(&op->dev);
1913         dma_async_device_unregister(&xdev->common);
1914
1915         for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1916                 if (xdev->chan[i])
1917                         xilinx_dma_chan_remove(xdev->chan[i]);
1918         }
1919
1920         iounmap(xdev->regs);
1921         dev_set_drvdata(&op->dev, NULL);
1922         kfree(xdev);
1923
1924         return 0;
1925 }
1926
1927 static const struct of_device_id xilinx_dma_of_ids[] = {
1928         { .compatible = "xlnx,axi-cdma",},
1929         { .compatible = "xlnx,axi-dma",},
1930         { .compatible = "xlnx,axi-vdma",},
1931         {}
1932 };
1933
1934 static struct platform_driver xilinx_dma_of_driver = {
1935         .driver = {
1936                 .name = "xilinx-dma",
1937                 .owner = THIS_MODULE,
1938                 .of_match_table = xilinx_dma_of_ids,
1939         },
1940         .probe = xilinx_dma_of_probe,
1941         .remove = __devexit_p(xilinx_dma_of_remove),
1942 };
1943
1944 /*----------------------------------------------------------------------------*/
1945 /* Module Init / Exit                                                         */
1946 /*----------------------------------------------------------------------------*/
1947
1948 static __init int xilinx_dma_init(void)
1949 {
1950         int ret;
1951
1952         pr_info("Xilinx DMA driver\n");
1953
1954         ret = platform_driver_register(&xilinx_dma_of_driver);
1955         if (ret)
1956                 pr_err("xilinx_dma: failed to register platform driver\n");
1957
1958         return ret;
1959 }
1960
1961 static void __exit xilinx_dma_exit(void)
1962 {
1963         platform_driver_unregister(&xilinx_dma_of_driver);
1964 }
1965
1966 subsys_initcall(xilinx_dma_init);
1967 module_exit(xilinx_dma_exit);
1968
1969 #else
1970
1971 /**************************************************/
1972 /* Platform bus to support ARM before device tree */
1973 /**************************************************/
1974
1975 /* The following probe and chan_probe functions were
1976    copied from the OF section above, then modified
1977    to use platform data.
1978 */
1979
1980 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1981 {
1982         free_irq(chan->irq, chan);
1983         list_del(&chan->common.device_node);
1984         kfree(chan);
1985 }
1986
1987 /*
1988  * Probing channels
1989  *
1990  * . Get channel features from the device tree entry
1991  * . Initialize special channel handling routines
1992  */
1993 static int __devinit xilinx_dma_chan_probe(struct platform_device *pdev,
1994                                 struct xilinx_dma_device *xdev,
1995                                 struct dma_channel_config *channel_config,
1996                                 int channel_num, u32 feature)
1997 {
1998         struct xilinx_dma_chan *chan;
1999         int err;
2000         u32 width = 0;
2001         struct resource *res;
2002
2003         /* alloc channel */
2004
2005
2006         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2007         if (!chan) {
2008                 dev_err(xdev->dev, "no free memory for DMA channels!\n");
2009                 err = -ENOMEM;
2010                 goto out_return;
2011         }
2012
2013         chan->feature = feature;
2014         chan->is_lite = 0;
2015         chan->has_DRE = 0;
2016         chan->has_SG = 0;
2017         chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
2018
2019         if (channel_config->include_dre)
2020                 chan->has_DRE = 1;
2021
2022         if (channel_config->genlock_mode)
2023                 chan->genlock = 1;
2024
2025         width = channel_config->datawidth >> 3;
2026         chan->feature |= width - 1;
2027
2028         if (feature & XILINX_DMA_IP_CDMA) {
2029
2030                 chan->direction = DMA_MEM_TO_MEM;
2031                 chan->start_transfer = xilinx_cdma_start_transfer;
2032
2033                 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2034                         XILINX_DMA_FTR_HAS_SG_SHIFT;
2035
2036                 if (channel_config->lite_mode) {
2037                         chan->is_lite = 1;
2038                         chan->max_len = width * channel_config->max_burst_len;
2039                 }
2040         }
2041
2042         if (feature & XILINX_DMA_IP_DMA) {
2043                 chan->has_SG = 1;
2044                 chan->start_transfer = xilinx_dma_start_transfer;
2045
2046                 if (!strcmp(channel_config->type, "axi-dma-mm2s-channel"))
2047                         chan->direction = DMA_MEM_TO_DEV;
2048
2049                 if (!strcmp(channel_config->type, "axi-dma-s2mm-channel"))
2050                         chan->direction = DMA_DEV_TO_MEM;
2051         }
2052
2053         if (feature & XILINX_DMA_IP_VDMA) {
2054
2055                 chan->start_transfer = xilinx_vdma_start_transfer;
2056
2057                 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2058                                 XILINX_DMA_FTR_HAS_SG_SHIFT;
2059
2060                 if (!strcmp(channel_config->type, "axi-vdma-mm2s-channel")) {
2061
2062                         printk(KERN_INFO, "axi-vdma-mm2s-channel found\n");
2063
2064                         chan->direction = DMA_MEM_TO_DEV;
2065                         if (!chan->has_SG) {
2066                                 chan->addr_regs = (struct vdma_addr_regs *)
2067                                 ((u32)xdev->regs +
2068                                 XILINX_VDMA_DIRECT_REG_OFFSET);
2069                         }
2070                 }
2071
2072                 if (!strcmp(channel_config->type, "axi-vdma-s2mm-channel")) {
2073
2074                         printk(KERN_INFO, "axi-vdma-s2mm-channel found\n");
2075
2076                         chan->direction = DMA_DEV_TO_MEM;
2077                         if (!chan->has_SG) {
2078                                 chan->addr_regs = (struct vdma_addr_regs *)
2079                                 ((u32)xdev->regs +
2080                                 XILINX_VDMA_DIRECT_REG_OFFSET +
2081                                 XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
2082                         }
2083                 }
2084         }
2085
2086         chan->regs = (struct xdma_regs *)xdev->regs;
2087         chan->id = 0;
2088
2089         if (chan->direction == DMA_DEV_TO_MEM) {
2090                 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
2091                                         XILINX_DMA_RX_CHANNEL_OFFSET);
2092                 chan->id = 1;
2093         }
2094
2095         /* Used by dmatest channel matching in slave transfers
2096          * Can change it to be a structure to have more matching information
2097          */
2098         chan->private = (chan->direction & 0xFF) |
2099                 (chan->feature & XILINX_DMA_IP_MASK);
2100         chan->common.private = (void *)&(chan->private);
2101
2102         if (!chan->has_DRE)
2103                 xdev->common.copy_align = my_log(width);
2104
2105         chan->dev = xdev->dev;
2106         xdev->chan[chan->id] = chan;
2107
2108         tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
2109
2110         /* Initialize the channel */
2111         if (dma_init(chan)) {
2112                 dev_err(xdev->dev, "Reset channel failed\n");
2113                 goto out_free_chan;
2114         }
2115
2116
2117         spin_lock_init(&chan->lock);
2118         INIT_LIST_HEAD(&chan->pending_list);
2119         INIT_LIST_HEAD(&chan->active_list);
2120
2121         chan->common.device = &xdev->common;
2122
2123         /* setup the interrupt for the channel */
2124
2125         res = platform_get_resource(pdev, IORESOURCE_IRQ, channel_num);
2126         chan->irq = res->start;
2127
2128         err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
2129                         "xilinx-dma-controller", chan);
2130         if (err) {
2131                 dev_err(xdev->dev, "unable to request IRQ\n");
2132                 goto out_free_irq;
2133         } else
2134                 dev_info(&pdev->dev, "using irq %d\n", chan->irq);
2135
2136         /* Add the channel to DMA device channel list */
2137         list_add_tail(&chan->common.device_node, &xdev->common.channels);
2138         xdev->common.chancnt++;
2139
2140         return 0;
2141
2142 out_free_irq:
2143         free_irq(chan->irq, chan);
2144 out_free_chan:
2145         kfree(chan);
2146 out_return:
2147         return err;
2148 }
2149
2150 static int __devinit xilinx_dma_probe(struct platform_device *pdev)
2151 {
2152         struct xilinx_dma_device *xdev;
2153         int err;
2154         int num_frames = 0;
2155         struct resource *res;
2156         struct device *dev = &pdev->dev;
2157         struct dma_device_config *dma_config;
2158         int channel;
2159
2160         dev_info(&pdev->dev, "Probing xilinx axi dma engines\n");
2161
2162         xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
2163         if (!xdev) {
2164                 dev_err(&pdev->dev, "Not enough memory for device\n");
2165                 err = -ENOMEM;
2166                 goto out_return;
2167         }
2168
2169         xdev->dev = &(pdev->dev);
2170         INIT_LIST_HEAD(&xdev->common.channels);
2171
2172         xdev->feature = 0;
2173
2174         /* iomap registers */
2175         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2176         if (!res) {
2177                 printk(KERN_ERR "get_resource for MEM resource for dev %d "
2178                        "failed\n", pdev->id);
2179                 err = -ENOMEM;
2180                 goto out_return;
2181         } else {
2182                 dev_info(&pdev->dev, "device %d actual base is %x\n",
2183                        pdev->id, (unsigned int)res->start);
2184         }
2185         if (!request_mem_region(res->start, 0x1000, "xilinx_axidma")) {
2186                 printk(KERN_ERR "memory request failue for base %x\n",
2187                        (unsigned int)res->start);
2188                 err = -ENOMEM;
2189                 goto out_return;
2190         }
2191
2192         xdev->regs = ioremap(res->start, 0x1000);
2193         pr_info("dma base remapped: %lx\n", (unsigned long)xdev->regs);
2194         if (!xdev->regs) {
2195                 dev_err(&pdev->dev, "unable to iomap registers\n");
2196                 err = -ENOMEM;
2197                 goto out_free_xdev;
2198         }
2199
2200         dma_config = (struct dma_device_config *)dev->platform_data;
2201
2202         /* Axi CDMA only does memcpy
2203          */
2204         if (!strcmp(dma_config->type, "axi-cdma")) {
2205
2206                 pr_info("found an axi-cdma configuration\n");
2207                 xdev->feature |= XILINX_DMA_IP_CDMA;
2208
2209                 if (dma_config->include_sg)
2210                         xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2211
2212                 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2213                 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
2214                 xdev->common.device_control = xilinx_dma_device_control;
2215                 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
2216         }
2217
2218         /* Axi DMA and VDMA only do slave transfers
2219          */
2220         if (!strcmp(dma_config->type, "axi-dma")) {
2221
2222                 pr_info("found an axi-dma configuration\n");
2223
2224                 xdev->feature |= XILINX_DMA_IP_DMA;
2225                 if (dma_config->sg_include_stscntrl_strm)
2226                         xdev->feature |= XILINX_DMA_FTR_STSCNTRL_STRM;
2227
2228                 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2229                 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2230                 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2231                 xdev->common.device_control = xilinx_dma_device_control;
2232                 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2233         }
2234
2235         if (!strcmp(dma_config->type, "axi-vdma")) {
2236
2237                 pr_info("found an axi-vdma configuration\n");
2238
2239                 xdev->feature |= XILINX_DMA_IP_VDMA;
2240
2241                 if (dma_config->include_sg)
2242                         xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2243
2244                 num_frames = dma_config->num_fstores;
2245
2246                 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2247                 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2248                 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
2249                 xdev->common.device_control = xilinx_vdma_device_control;
2250                 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
2251         }
2252
2253         xdev->common.device_alloc_chan_resources =
2254                                 xilinx_dma_alloc_chan_resources;
2255         xdev->common.device_free_chan_resources =
2256                                 xilinx_dma_free_chan_resources;
2257         xdev->common.device_tx_status = xilinx_tx_status;
2258         xdev->common.dev = &pdev->dev;
2259
2260         dev_set_drvdata(&pdev->dev, xdev);
2261
2262         for (channel = 0; channel < dma_config->channel_count; channel++)
2263                 xilinx_dma_chan_probe(pdev, xdev,
2264                         &dma_config->channel_config[channel],
2265                         channel, xdev->feature);
2266
2267         if (xdev->feature & XILINX_DMA_IP_VDMA) {
2268                 int i;
2269
2270                 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
2271                         if (xdev->chan[i])
2272                                 xdev->chan[i]->num_frms = num_frames;
2273                 }
2274         }
2275
2276         dma_async_device_register(&xdev->common);
2277
2278         return 0;
2279
2280 out_free_xdev:
2281         kfree(xdev);
2282
2283 out_return:
2284         return err;
2285 }
2286
2287
2288 static int __exit xilinx_dma_remove(struct platform_device *pdev)
2289 {
2290         struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2291         int i;
2292 #if 1
2293         dma_async_device_unregister(&xdev->common);
2294 #endif
2295         for (i = 0; i < 2; i++) {
2296                 if (xdev->chan[i])
2297                         xilinx_dma_chan_remove(xdev->chan[i]);
2298         }
2299
2300         iounmap(xdev->regs);
2301         dev_set_drvdata(&pdev->dev, NULL);
2302         kfree(xdev);
2303
2304         return 0;
2305 }
2306
2307 static void xilinx_dma_shutdown(struct platform_device *pdev)
2308 {
2309         struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2310         int i;
2311
2312         for (i = 0; i < 2; i++)
2313                 dma_halt(xdev->chan[i]);
2314 }
2315
2316 static struct platform_driver xilinx_dma_driver = {
2317         .probe = xilinx_dma_probe,
2318         .remove = __exit_p(xilinx_dma_remove),
2319         .shutdown = xilinx_dma_shutdown,
2320         .driver = {
2321                 .owner = THIS_MODULE,
2322                 .name = "xilinx-axidma",
2323         },
2324 };
2325
2326 /*----------------------------------------------------------------------------*/
2327 /* Module Init / Exit                                                         */
2328 /*----------------------------------------------------------------------------*/
2329
2330 static __init int xilinx_dma_init(void)
2331 {
2332         int status;
2333         status = platform_driver_register(&xilinx_dma_driver);
2334         return status;
2335 }
2336 module_init(xilinx_dma_init);
2337
2338 static void __exit xilinx_dma_exit(void)
2339 {
2340         platform_driver_unregister(&xilinx_dma_driver);
2341 }
2342
2343 module_exit(xilinx_dma_exit);
2344 #endif
2345
2346 MODULE_DESCRIPTION("Xilinx DMA/CDMA/VDMA driver");
2347 MODULE_LICENSE("GPL");