2 * Xilinx AXI DMA Engine support
4 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
7 * This driver supports Xilinx AXI DMA engine:
8 * . Axi DMA engine, it does transfers between memory and device. It can be
9 * configured to have one channel or two channels. If configured as two
10 * channels, one is for transmit to device and another is for receive from
13 * This is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmapool.h>
25 #include <linux/slab.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pagemap.h>
28 #include <linux/device.h>
29 #include <linux/types.h>
32 #include <linux/gfp.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <asm/cacheflush.h>
36 #include <linux/sched.h>
37 #include <linux/dma-buf.h>
40 #include <linux/irq.h>
41 #include <linux/of_irq.h>
43 #include "xilinx-dma-apf.h"
47 static DEFINE_MUTEX(dma_list_mutex);
48 static LIST_HEAD(dma_device_list);
50 #define DMA_OUT_64(addr, val) (writeq(val, addr))
51 #define DMA_OUT(addr, val) (iowrite32(val, addr))
52 #define DMA_IN(addr) (ioread32(addr))
54 #define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
55 #define GET_HI(x) ((u32)((x) / 0x100000000))
57 static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
58 /* Driver functions */
59 static void xdma_clean_bd(struct xdma_desc_hw *bd)
73 static int dma_is_running(struct xdma_chan *chan)
75 return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
76 (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
79 static int dma_is_idle(struct xdma_chan *chan)
81 return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
84 static void dma_halt(struct xdma_chan *chan)
86 DMA_OUT(&chan->regs->cr,
87 (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
90 static void dma_start(struct xdma_chan *chan)
92 DMA_OUT(&chan->regs->cr,
93 (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
96 static int dma_init(struct xdma_chan *chan)
98 int loop = XDMA_RESET_LOOP;
100 DMA_OUT(&chan->regs->cr,
101 (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
103 /* Wait for the hardware to finish reset
106 if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
118 static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
124 * We need the descriptor to be aligned to 64bytes
125 * for meeting Xilinx DMA specification requirement.
127 ptr = (u8 *)dma_alloc_coherent(chan->dev,
128 (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
134 "unable to allocate channel %d descriptor pool\n",
139 memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
143 chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
146 * Pre allocate all the channels.
148 for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
149 chan->bds[i] = (struct xdma_desc_hw *)
150 (ptr + (sizeof(struct xdma_desc_hw) * i));
151 chan->bds[i]->next_desc = chan->bd_phys_addr +
152 (sizeof(struct xdma_desc_hw) *
153 ((i + 1) % XDMA_MAX_BD_CNT));
156 /* there is at least one descriptor free to be allocated */
160 static void xdma_free_chan_resources(struct xdma_chan *chan)
162 dev_dbg(chan->dev, "Free all channel resources.\n");
163 dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
164 XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
167 static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
169 struct xdma_desc_hw *desc;
170 unsigned int start, end;
173 spin_lock_irqsave(&chan->lock, flags);
175 end = XDMA_MAX_BD_CNT;
177 while (start < end) {
178 desc = chan->bds[start];
182 /* Re-initialize bd_cur and bd_tail values */
186 spin_unlock_irqrestore(&chan->lock, flags);
189 static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
191 struct xdma_head *dmahead;
192 struct xdma_desc_hw *desc;
193 struct completion *cmp;
196 spin_lock_irqsave(&chan->lock, flags);
197 #define XDMA_BD_STS_RXEOF_MASK 0x04000000
198 desc = chan->bds[chan->bd_cur];
199 while (desc->status & XDMA_BD_STS_ALL_MASK) {
200 if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
202 pr_info("ERROR: premature EOF on DMA\n");
203 dma_init(chan); /* reset the dma HW */
204 while (!(desc->dmahead)) {
208 if (chan->bd_cur >= XDMA_MAX_BD_CNT)
210 desc = chan->bds[chan->bd_cur];
214 if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
215 if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
218 dmahead = (struct xdma_head *)desc->dmahead;
219 cmp = (struct completion *)&dmahead->cmp;
220 if (dmahead->nappwords_o)
221 memcpy(dmahead->appwords_o, desc->app,
222 dmahead->nappwords_o * sizeof(u32));
232 if (chan->bd_cur >= XDMA_MAX_BD_CNT)
234 desc = chan->bds[chan->bd_cur];
236 spin_unlock_irqrestore(&chan->lock, flags);
239 static void xdma_err_tasklet(unsigned long data)
241 struct xdma_chan *chan = (struct xdma_chan *)data;
244 /* If reset failed, need to hard reset
245 * Channel is no longer functional
250 dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
253 /* Barrier to assert descriptor init is reaches memory */
255 xilinx_chan_desc_cleanup(chan);
257 xilinx_chan_desc_reinit(chan);
260 static void xdma_tasklet(unsigned long data)
262 struct xdma_chan *chan = (struct xdma_chan *)data;
264 xilinx_chan_desc_cleanup(chan);
267 static void dump_cur_bd(struct xdma_chan *chan)
271 index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
272 sizeof(struct xdma_desc_hw);
274 dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
275 dev_err(chan->dev, " buf = %p\n",
276 (void *)chan->bds[index]->src_addr);
277 dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
278 dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
279 dev_err(chan->dev, " next = %p\n",
280 (void *)chan->bds[index]->next_desc);
283 static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
285 struct xdma_chan *chan = data;
288 stat = DMA_IN(&chan->regs->sr);
290 if (!(stat & XDMA_XR_IRQ_ALL_MASK))
293 /* Ack the interrupts */
294 DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
296 if (stat & XDMA_XR_IRQ_ERROR_MASK) {
297 dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
298 chan->name, (unsigned int)stat,
299 (unsigned int)DMA_IN(&chan->regs->cdr),
300 (unsigned int)DMA_IN(&chan->regs->tdr));
305 tasklet_schedule(&chan->dma_err_tasklet);
308 if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
309 (stat & XDMA_XR_IRQ_IOC_MASK)))
310 tasklet_schedule(&chan->tasklet);
315 static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
317 struct xdma_chan *chan = data;
320 stat = DMA_IN(&chan->regs->sr);
322 if (!(stat & XDMA_XR_IRQ_ALL_MASK))
325 /* Ack the interrupts */
326 DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
328 if (stat & XDMA_XR_IRQ_ERROR_MASK) {
329 dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
330 chan->name, (unsigned int)stat,
331 (unsigned int)DMA_IN(&chan->regs->cdr),
332 (unsigned int)DMA_IN(&chan->regs->tdr));
337 tasklet_schedule(&chan->dma_err_tasklet);
340 if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
341 (stat & XDMA_XR_IRQ_IOC_MASK)))
342 tasklet_schedule(&chan->tasklet);
347 static void xdma_start_transfer(struct xdma_chan *chan,
351 xlnk_intptr_type cur_phys;
352 xlnk_intptr_type tail_phys;
358 cur_phys = chan->bd_phys_addr + (start_index *
359 sizeof(struct xdma_desc_hw));
360 tail_phys = chan->bd_phys_addr + (end_index *
361 sizeof(struct xdma_desc_hw));
362 /* If hardware is busy, move the tail & return */
363 if (dma_is_running(chan) || dma_is_idle(chan)) {
364 #if XLNK_SYS_BIT_WIDTH == 32
365 DMA_OUT(&chan->regs->tdr, tail_phys);
367 DMA_OUT_64(&chan->regs->tdr, tail_phys);
372 #if XLNK_SYS_BIT_WIDTH == 32
373 DMA_OUT(&chan->regs->cdr, cur_phys);
375 DMA_OUT_64(&chan->regs->cdr, cur_phys);
380 /* Enable interrupts */
381 regval = DMA_IN(&chan->regs->cr);
382 regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
383 : XDMA_XR_IRQ_ALL_MASK);
384 DMA_OUT(&chan->regs->cr, regval);
386 /* Update tail ptr register and start the transfer */
387 #if XLNK_SYS_BIT_WIDTH == 32
388 DMA_OUT(&chan->regs->tdr, tail_phys);
390 DMA_OUT_64(&chan->regs->tdr, tail_phys);
394 static int xdma_setup_hw_desc(struct xdma_chan *chan,
395 struct xdma_head *dmahead,
396 struct scatterlist *sgl,
398 enum dma_data_direction direction,
399 unsigned int nappwords_i,
402 struct xdma_desc_hw *bd = NULL;
404 struct scatterlist *sg;
407 int i, start_index = -1, end_index1 = 0, end_index2 = -1;
410 unsigned int bd_used_saved;
413 pr_err("Requested transfer on invalid channel\n");
417 /* if we almost run out of bd, try to recycle some */
418 if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
419 xilinx_chan_desc_cleanup(chan);
421 spin_lock_irqsave(&chan->lock, flags);
423 bd_used_saved = chan->bd_used;
425 * Build transactions using information in the scatter gather list
427 for_each_sg(sgl, sg, sg_len, i) {
430 /* Loop until the entire scatterlist entry is used */
431 while (sg_used < sg_dma_len(sg)) {
432 /* Allocate the link descriptor from DMA pool */
433 bd = chan->bds[chan->bd_tail];
434 if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
435 end_index2 = chan->bd_tail;
437 /* If first was not set, then we failed to
438 * allocate the very first descriptor,
441 if (start_index == -1)
447 * Calculate the maximum number of bytes to transfer,
448 * making sure it is less than the DMA controller limit
450 copy = min((size_t)(sg_dma_len(sg) - sg_used),
451 (size_t)chan->max_len);
453 * Only the src address for DMA
455 dma_src = sg_dma_address(sg) + sg_used;
456 bd->src_addr = dma_src;
458 /* Fill in the descriptor */
462 * If this is not the first descriptor, chain the
463 * current descriptor after the previous descriptor
465 * For the first DMA_TO_DEVICE transfer, set SOP
467 if (start_index == -1) {
468 start_index = chan->bd_tail;
471 memcpy(bd->app, appwords_i,
472 nappwords_i * sizeof(u32));
474 if (direction == DMA_TO_DEVICE)
475 bd->control |= XDMA_BD_SOP;
479 end_index2 = chan->bd_tail;
482 if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
483 end_index1 = XDMA_MAX_BD_CNT;
489 if (start_index == -1) {
494 bd->dmahead = (xlnk_intptr_type)dmahead;
495 bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
496 dmahead->last_bd_index = end_index2;
498 if (direction == DMA_TO_DEVICE)
499 bd->control |= XDMA_BD_EOP;
501 /* Barrier to assert control word write commits */
504 xdma_start_transfer(chan, start_index, end_index2);
506 spin_unlock_irqrestore(&chan->lock, flags);
511 for (i = start_index; i < end_index2; i++)
512 xdma_clean_bd(chan->bds[i]);
514 /* clean till the end of bd list first, and then 2nd end */
515 for (i = start_index; i < end_index1; i++)
516 xdma_clean_bd(chan->bds[i]);
519 for (i = end_index1; i < end_index2; i++)
520 xdma_clean_bd(chan->bds[i]);
522 /* Move the bd_tail back */
523 chan->bd_tail = start_index;
524 chan->bd_used = bd_used_saved;
527 spin_unlock_irqrestore(&chan->lock, flags);
533 * create minimal length scatter gather list for physically contiguous buffer
534 * that starts at phy_buf and has length phy_buf_len bytes
536 static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
537 unsigned int phy_buf_len,
538 struct scatterlist *sgl)
540 unsigned int sgl_cnt = 0;
541 struct scatterlist *sgl_head;
542 unsigned int dma_len;
545 if (!phy_buf || !phy_buf_len) {
546 pr_err("phy_buf is NULL or phy_buf_len = 0\n");
550 num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
551 / XDMA_MAX_TRANS_LEN;
553 sg_init_table(sgl, num_bd);
555 while (phy_buf_len > 0) {
556 xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
557 unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
560 if (sgl_cnt > XDMA_MAX_BD_CNT)
563 dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
564 XDMA_MAX_TRANS_LEN : phy_buf_len;
566 sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
567 sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
568 sg_dma_len(sgl_head) = dma_len;
569 sgl_head = sg_next(sgl_head);
572 phy_buf_len -= dma_len;
578 /* merge sg list, sgl, with length sgl_len, to sgl_merged, to save dma bds */
579 static unsigned int sgl_merge(struct scatterlist *sgl,
580 unsigned int sgl_len,
581 struct scatterlist *sgl_merged)
583 struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
584 unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
585 unsigned int dma_len = 0;
587 sg_init_table(sgl_merged, sgl_len);
588 sg_merged_head = sgl_merged;
591 while (sghead && (sg_visited_cnt < sgl_len)) {
592 dma_len = sg_dma_len(sghead);
595 sgnext = sg_next(sgend);
597 while (sgnext && (sg_visited_cnt < sgl_len)) {
598 if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
599 sg_dma_address(sgnext))
602 if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
606 dma_len += sg_dma_len(sgend);
608 sgnext = sg_next(sgnext);
612 if (sg_merged_num > XDMA_MAX_BD_CNT)
615 memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
617 sg_dma_len(sg_merged_head) = dma_len;
619 sg_merged_head = sg_next(sg_merged_head);
620 sghead = sg_next(sgend);
623 return sg_merged_num;
626 static int pin_user_pages(xlnk_intptr_type uaddr,
629 struct scatterlist **scatterpp,
631 unsigned int user_flags)
634 struct mm_struct *mm = current->mm;
635 unsigned int first_page;
636 unsigned int last_page;
637 unsigned int num_pages;
638 struct scatterlist *sglist;
639 struct page **mapped_pages;
646 first_page = uaddr / PAGE_SIZE;
647 last_page = (uaddr + ulen - 1) / PAGE_SIZE;
648 num_pages = last_page - first_page + 1;
649 mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
653 down_read(&mm->mmap_sem);
654 status = get_user_pages(uaddr, num_pages,
655 (write ? FOLL_WRITE : 0) | FOLL_FORCE,
657 up_read(&mm->mmap_sem);
659 if (status == num_pages) {
660 sglist = kcalloc(num_pages,
661 sizeof(struct scatterlist),
664 pr_err("%s: kcalloc failed to create sg list\n",
669 sg_init_table(sglist, num_pages);
671 for (pgidx = 0; pgidx < status; pgidx++) {
672 if (pgidx == 0 && num_pages != 1) {
673 pgoff = uaddr & (~PAGE_MASK);
674 pglen = PAGE_SIZE - pgoff;
675 } else if (pgidx == 0 && num_pages == 1) {
676 pgoff = uaddr & (~PAGE_MASK);
678 } else if (pgidx == num_pages - 1) {
680 pglen = ulen - sublen;
688 sg_set_page(&sglist[pgidx],
692 sg_dma_len(&sglist[pgidx]) = pglen;
701 pr_err("Failed to pin user pages\n");
702 for (pgidx = 0; pgidx < status; pgidx++)
703 put_page(mapped_pages[pgidx]);
708 static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
716 for (i = 0; i < cnt; i++) {
717 pg = sg_page(sglist + i);
726 struct xdma_chan *xdma_request_channel(char *name)
729 struct xdma_device *device, *tmp;
731 list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
732 for (i = 0; i < device->channel_count; i++) {
733 if (!strcmp(device->chan[i]->name, name))
734 return device->chan[i];
739 EXPORT_SYMBOL(xdma_request_channel);
741 void xdma_release_channel(struct xdma_chan *chan)
743 EXPORT_SYMBOL(xdma_release_channel);
745 void xdma_release_all_channels(void)
748 struct xdma_device *device, *tmp;
750 list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
751 for (i = 0; i < device->channel_count; i++) {
752 if (device->chan[i]->client_count) {
753 dma_halt(device->chan[i]);
754 xilinx_chan_desc_reinit(device->chan[i]);
755 pr_info("%s: chan %s freed\n",
757 device->chan[i]->name);
762 EXPORT_SYMBOL(xdma_release_all_channels);
764 static void xdma_release(struct device *dev)
768 int xdma_submit(struct xdma_chan *chan,
769 xlnk_intptr_type userbuf,
772 unsigned int nappwords_i,
774 unsigned int nappwords_o,
775 unsigned int user_flags,
776 struct xdma_head **dmaheadpp,
777 struct xlnk_dmabuf_reg *dp)
779 struct xdma_head *dmahead;
780 struct scatterlist *pagelist = NULL;
781 struct scatterlist *sglist = NULL;
782 unsigned int pagecnt = 0;
783 unsigned int sgcnt = 0;
784 enum dma_data_direction dmadir;
786 unsigned long attrs = 0;
788 dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
792 dmahead->chan = chan;
793 dmahead->userbuf = userbuf;
794 dmahead->size = size;
795 dmahead->dmadir = chan->direction;
796 dmahead->userflag = user_flags;
797 dmahead->dmabuf = dp;
798 dmadir = chan->direction;
800 if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
801 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
805 struct scatterlist *sg;
806 unsigned int remaining_size = size;
808 if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
809 pr_err("%s dmabuf not mapped: %p\n",
810 __func__, dp->dbuf_sg_table);
813 if (dp->dbuf_sg_table->nents == 0) {
814 pr_err("%s: cannot map a scatterlist with 0 entries\n",
818 sglist = kmalloc_array(dp->dbuf_sg_table->nents,
824 sg_init_table(sglist, dp->dbuf_sg_table->nents);
826 for_each_sg(dp->dbuf_sg_table->sgl,
828 dp->dbuf_sg_table->nents,
830 sg_set_page(sglist + i,
834 sg_dma_address(sglist + i) = sg_dma_address(sg);
835 if (remaining_size == 0) {
836 sg_dma_len(sglist + i) = 0;
837 } else if (sg_dma_len(sg) > remaining_size) {
838 sg_dma_len(sglist + i) = remaining_size;
841 sg_dma_len(sglist + i) = sg_dma_len(sg);
842 remaining_size -= sg_dma_len(sg);
846 dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
849 } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
852 elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
853 sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
854 sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
858 status = get_dma_ops(chan->dev)->map_sg(chan->dev,
865 pr_err("sg contiguous mapping failed\n");
871 status = pin_user_pages(userbuf,
873 dmadir != DMA_TO_DEVICE,
878 pr_err("pin_user_pages failed\n");
882 status = get_dma_ops(chan->dev)->map_sg(chan->dev,
888 pr_err("dma_map_sg failed\n");
889 unpin_user_pages(pagelist, pagecnt);
893 sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
895 sgcnt = sgl_merge(pagelist, pagecnt, sglist);
897 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
902 unpin_user_pages(pagelist, pagecnt);
907 dmahead->sglist = sglist;
908 dmahead->sgcnt = sgcnt;
909 dmahead->pagelist = pagelist;
910 dmahead->pagecnt = pagecnt;
912 /* skipping config */
913 init_completion(&dmahead->cmp);
915 if (nappwords_i > XDMA_MAX_APPWORDS)
916 nappwords_i = XDMA_MAX_APPWORDS;
918 if (nappwords_o > XDMA_MAX_APPWORDS)
919 nappwords_o = XDMA_MAX_APPWORDS;
921 dmahead->nappwords_o = nappwords_o;
923 status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
924 dmadir, nappwords_i, appwords_i);
926 pr_err("setup hw desc failed\n");
927 if (dmahead->pagelist) {
928 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
933 unpin_user_pages(pagelist, pagecnt);
935 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
941 kfree(dmahead->sglist);
945 *dmaheadpp = dmahead;
948 EXPORT_SYMBOL(xdma_submit);
950 int xdma_wait(struct xdma_head *dmahead,
951 unsigned int user_flags,
952 unsigned int *operating_flags)
954 struct xdma_chan *chan = dmahead->chan;
955 unsigned long attrs = 0;
957 if (chan->poll_mode) {
958 xilinx_chan_desc_cleanup(chan);
959 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
961 if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
962 if (!try_wait_for_completion(&dmahead->cmp))
964 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
966 wait_for_completion(&dmahead->cmp);
967 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
971 if (!dmahead->dmabuf) {
972 if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
973 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
975 if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
976 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
982 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
987 unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
990 kfree(dmahead->sglist);
994 EXPORT_SYMBOL(xdma_wait);
996 int xdma_getconfig(struct xdma_chan *chan,
997 unsigned char *irq_thresh,
998 unsigned char *irq_delay)
1000 *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
1001 *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
1004 EXPORT_SYMBOL(xdma_getconfig);
1006 int xdma_setconfig(struct xdma_chan *chan,
1007 unsigned char irq_thresh,
1008 unsigned char irq_delay)
1012 if (dma_is_running(chan))
1015 val = DMA_IN(&chan->regs->cr);
1016 val &= ~((0xff << XDMA_COALESCE_SHIFT) |
1017 (0xff << XDMA_DELAY_SHIFT));
1018 val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
1019 (irq_delay << XDMA_DELAY_SHIFT));
1021 DMA_OUT(&chan->regs->cr, val);
1024 EXPORT_SYMBOL(xdma_setconfig);
1026 static const struct of_device_id gic_match[] = {
1027 { .compatible = "arm,cortex-a9-gic", },
1028 { .compatible = "arm,cortex-a15-gic", },
1032 static struct device_node *gic_node;
1034 unsigned int xlate_irq(unsigned int hwirq)
1036 struct of_phandle_args irq_data;
1040 gic_node = of_find_matching_node(NULL, gic_match);
1042 if (WARN_ON(!gic_node))
1045 irq_data.np = gic_node;
1046 irq_data.args_count = 3;
1047 irq_data.args[0] = 0;
1048 #if XLNK_SYS_BIT_WIDTH == 32
1049 irq_data.args[1] = hwirq - 32; /* GIC SPI offset */
1051 irq_data.args[1] = hwirq;
1053 irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
1055 irq = irq_create_of_mapping(&irq_data);
1059 pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
1064 /* Brute-force probing for xilinx DMA
1066 static int xdma_probe(struct platform_device *pdev)
1068 struct xdma_device *xdev;
1069 struct resource *res;
1071 struct xdma_chan *chan;
1072 struct xdma_device_config *dma_config;
1074 int dma_chan_reg_offset;
1076 pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
1077 &pdev->dev, pdev->num_resources, pdev->id);
1079 xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
1082 xdev->dev = &pdev->dev;
1084 /* Set this as configurable once HPC works */
1085 arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
1086 dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
1088 dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
1089 if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
1092 /* Get the memory resource */
1093 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1094 xdev->regs = devm_ioremap_resource(&pdev->dev, res);
1096 dev_err(&pdev->dev, "unable to iomap registers\n");
1100 dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
1101 pdev->id, &res->start);
1102 dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
1103 pdev->id, &xdev->regs);
1105 /* Allocate the channels */
1107 dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
1108 for (i = 0; i < dma_config->channel_count; i++) {
1109 chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
1113 dma_chan_dir = strcmp(dma_config->channel_config[i].type,
1114 "axi-dma-mm2s-channel") ?
1117 dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
1121 /* Initialize channel parameters */
1123 chan->regs = xdev->regs + dma_chan_reg_offset;
1124 /* chan->regs = xdev->regs; */
1125 chan->dev = xdev->dev;
1126 chan->max_len = XDMA_MAX_TRANS_LEN;
1127 chan->direction = dma_chan_dir;
1128 sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
1129 pr_info(" chan %d name: %s\n", chan->id, chan->name);
1130 pr_info(" chan %d direction: %s\n", chan->id,
1131 dma_chan_dir == DMA_FROM_DEVICE ?
1132 "FROM_DEVICE" : "TO_DEVICE");
1134 spin_lock_init(&chan->lock);
1135 tasklet_init(&chan->tasklet,
1137 (unsigned long)chan);
1138 tasklet_init(&chan->dma_err_tasklet,
1140 (unsigned long)chan);
1142 xdev->chan[chan->id] = chan;
1144 /* The IRQ resource */
1145 chan->irq = xlate_irq(dma_config->channel_config[i].irq);
1146 if (chan->irq <= 0) {
1147 pr_err("get_resource for IRQ for dev %d failed\n",
1152 err = devm_request_irq(&pdev->dev,
1154 dma_chan_dir == DMA_TO_DEVICE ?
1155 xdma_tx_intr_handler :
1156 xdma_rx_intr_handler,
1161 dev_err(&pdev->dev, "unable to request IRQ\n");
1164 pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
1166 chan->poll_mode = dma_config->channel_config[i].poll_mode;
1167 pr_info(" chan%d poll mode: %s\n",
1169 chan->poll_mode ? "on" : "off");
1171 /* Allocate channel BD's */
1172 err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
1174 dev_err(&pdev->dev, "unable to allocate BD's\n");
1177 pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
1179 (void *)chan->bd_phys_addr,
1180 chan->bd_chain_size);
1182 err = dma_init(xdev->chan[chan->id]);
1184 dev_err(&pdev->dev, "DMA init failed\n");
1185 /* FIXME Check this - unregister all chan resources */
1186 for (j = 0; j <= i; j++)
1187 xdma_free_chan_resources(xdev->chan[j]);
1191 xdev->channel_count = dma_config->channel_count;
1192 pdev->dev.release = xdma_release;
1193 /* Add the DMA device to the global list */
1194 mutex_lock(&dma_list_mutex);
1195 list_add_tail(&xdev->node, &dma_device_list);
1196 mutex_unlock(&dma_list_mutex);
1198 platform_set_drvdata(pdev, xdev);
1203 static int xdma_remove(struct platform_device *pdev)
1206 struct xdma_device *xdev = platform_get_drvdata(pdev);
1208 /* Remove the DMA device from the global list */
1209 mutex_lock(&dma_list_mutex);
1210 list_del(&xdev->node);
1211 mutex_unlock(&dma_list_mutex);
1213 for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
1215 xdma_free_chan_resources(xdev->chan[i]);
1221 static struct platform_driver xdma_driver = {
1222 .probe = xdma_probe,
1223 .remove = xdma_remove,
1225 .name = "xilinx-axidma",
1229 module_platform_driver(xdma_driver);
1231 MODULE_DESCRIPTION("Xilinx DMA driver");
1232 MODULE_LICENSE("GPL");