4 * Xilinx Accelerator driver support.
6 * Copyright (C) 2010 Xilinx Inc.
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 /* ----------------------------------- Host OS */
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/gfp.h>
26 #include <asm/cacheflush.h>
28 #include <linux/dma-buf.h>
30 #include <linux/string.h>
32 #include <linux/uaccess.h>
34 #include <linux/dmaengine.h>
35 #include <linux/completion.h>
36 #include <linux/wait.h>
38 #include <linux/device.h>
39 #include <linux/init.h>
40 #include <linux/cdev.h>
42 #include <linux/sched.h>
43 #include <linux/pagemap.h>
44 #include <linux/errno.h> /* error codes */
45 #include <linux/dma-mapping.h> /* dma */
47 #include <linux/list.h>
48 #include <linux/dma/xilinx_dma.h>
49 #include <linux/uio_driver.h>
50 #include <asm/cacheflush.h>
52 #include "xlnk-ioctl.h"
53 #include "xlnk-config.h"
54 #include "xlnk-sysdef.h"
57 #ifdef CONFIG_XILINX_DMA_APF
58 #include "xilinx-dma-apf.h"
61 #ifdef CONFIG_XILINX_MCDMA
65 static void xdma_if_device_release(struct device *op)
71 #define DRIVER_NAME "xlnk"
72 #define DRIVER_VERSION "0.2"
74 static struct platform_device *xlnk_pdev;
75 static struct device *xlnk_dev;
77 static struct cdev xlnk_cdev;
79 static struct class *xlnk_class;
81 static s32 driver_major;
83 static char *driver_name = DRIVER_NAME;
85 static void *xlnk_dev_buf;
86 static ssize_t xlnk_dev_size;
87 static int xlnk_dev_vmas;
89 #define XLNK_BUF_POOL_SIZE 4096
90 static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
91 static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
92 static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
93 static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
94 static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
95 static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
96 static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
97 static spinlock_t xlnk_buf_lock;
99 /* only used with standard DMA mode */
100 static struct page **xlnk_page_store;
101 static int xlnk_page_store_size;
103 static int xlnk_open(struct inode *ip, struct file *filp); /* Open */
104 static int xlnk_release(struct inode *ip, struct file *filp); /* Release */
105 static long xlnk_ioctl(struct file *filp, unsigned int code,
107 static ssize_t xlnk_read(struct file *filp, char __user *buf,
108 size_t count, loff_t *offp);
109 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
110 size_t count, loff_t *offp);
111 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
112 static void xlnk_vma_open(struct vm_area_struct *vma);
113 static void xlnk_vma_close(struct vm_area_struct *vma);
115 static int xlnk_init_bufpool(void);
117 LIST_HEAD(xlnk_dmabuf_list);
119 static int xlnk_shutdown(unsigned long buf);
120 static int xlnk_recover_resource(unsigned long buf);
122 static const struct file_operations xlnk_fops = {
124 .release = xlnk_release,
127 .unlocked_ioctl = xlnk_ioctl,
131 #define MAX_XLNK_DMAS 16
133 struct xlnk_device_pack {
135 struct platform_device pdev;
136 struct resource res[8];
137 struct uio_info *io_ptr;
139 #ifdef CONFIG_XILINX_DMA_APF
140 struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
141 struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
144 #ifdef CONFIG_XILINX_MCDMA
145 struct xdma_device_info mcdma_dev_cfg; /* for mcdma only */
150 static spinlock_t xlnk_devpack_lock;
151 static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
152 static void xlnk_devpacks_init(void)
156 spin_lock_init(&xlnk_devpack_lock);
157 for (i = 0; i < MAX_XLNK_DMAS; i++)
158 xlnk_devpacks[i] = NULL;
162 static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
166 for (i = 0; i < MAX_XLNK_DMAS; i++) {
167 if (xlnk_devpacks[i] == devpack)
168 xlnk_devpacks[i] = NULL;
172 static void xlnk_devpacks_add(struct xlnk_device_pack *devpack)
176 spin_lock_irq(&xlnk_devpack_lock);
177 for (i = 0; i < MAX_XLNK_DMAS; i++) {
178 if (xlnk_devpacks[i] == NULL) {
179 xlnk_devpacks[i] = devpack;
183 spin_unlock_irq(&xlnk_devpack_lock);
186 static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
190 for (i = 0; i < MAX_XLNK_DMAS; i++) {
192 && xlnk_devpacks[i]->res[0].start == base)
193 return xlnk_devpacks[i];
198 static void xlnk_devpacks_free(xlnk_intptr_type base)
200 struct xlnk_device_pack *devpack;
202 devpack = xlnk_devpacks_find(base);
204 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
206 uio_unregister_device(devpack->io_ptr);
207 if (strcmp(devpack->pdev.name, "xilinx-axidma") != 0)
208 platform_device_unregister(&devpack->pdev);
210 platform_device_unregister(&devpack->pdev);
212 xlnk_devpacks_delete(devpack);
217 static void xlnk_devpacks_free_all(void)
219 struct xlnk_device_pack *devpack;
222 for (i = 0; i < MAX_XLNK_DMAS; i++) {
223 devpack = xlnk_devpacks[i];
225 if (devpack->io_ptr) {
226 uio_unregister_device(devpack->io_ptr);
227 kfree(devpack->io_ptr);
229 platform_device_unregister(&devpack->pdev);
231 xlnk_devpacks_delete(devpack);
237 static void xlnk_load_config_from_dt(struct platform_device *pdev)
239 const char *dma_name = NULL;
240 struct xlnk_config_block block;
243 xlnk_get_config(&block);
245 if (of_property_read_string(xlnk_dev->of_node,
248 if (strcmp(dma_name, "manual") == 0) {
249 block.valid_mask[xlnk_config_valid_dma_type] = 1;
250 block.dma_type = xlnk_config_dma_manual;
251 } else if (strcmp(dma_name, "standard") == 0) {
252 block.valid_mask[xlnk_config_valid_dma_type] = 1;
253 block.dma_type = xlnk_config_dma_standard;
255 pr_err("%s: Unrecognized DMA type %s\n",
258 xlnk_set_config(&block);
261 static int xlnk_probe(struct platform_device *pdev)
270 /* use 2.6 device model */
271 xlnk_page_store_size = 1024;
272 xlnk_page_store = vmalloc(sizeof(struct page *) * xlnk_page_store_size);
273 if (!xlnk_page_store) {
274 pr_err("failed to allocate memory for page store\n");
278 err = alloc_chrdev_region(&dev, 0, 1, driver_name);
280 dev_err(&pdev->dev, "%s: Can't get major %d\n",
281 __func__, driver_major);
285 cdev_init(&xlnk_cdev, &xlnk_fops);
287 xlnk_cdev.owner = THIS_MODULE;
289 err = cdev_add(&xlnk_cdev, dev, 1);
292 dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
298 xlnk_class = class_create(THIS_MODULE, "xlnk");
299 if (IS_ERR(xlnk_class)) {
300 dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
304 driver_major = MAJOR(dev);
306 dev_info(&pdev->dev, "Major %d\n", driver_major);
308 device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
313 dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
316 xlnk_dev = &pdev->dev;
318 xlnk_load_config_from_dt(pdev);
321 dev_info(&pdev->dev, "xlnk_pdev is not null\n");
323 dev_info(&pdev->dev, "xlnk_pdev is null\n");
325 xlnk_devpacks_init();
331 cdev_del(&xlnk_cdev);
332 unregister_chrdev_region(dev, 1);
337 static int xlnk_buf_findnull(void)
341 for (i = 1; i < xlnk_bufpool_size; i++) {
342 if (!xlnk_bufpool[i])
349 static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
353 for (i = 1; i < xlnk_bufpool_size; i++) {
354 if (xlnk_bufpool[i] &&
355 xlnk_phyaddr[i] <= addr &&
356 xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
364 * allocate and return an id
365 * id must be a positve number
367 static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
371 dma_addr_t phys_addr_anchor;
372 unsigned int page_dst;
375 kaddr = dma_alloc_noncoherent(xlnk_dev,
382 kaddr = dma_alloc_coherent(xlnk_dev,
391 spin_lock(&xlnk_buf_lock);
392 id = xlnk_buf_findnull();
393 if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
394 xlnk_bufpool_alloc_point[id] = kaddr;
395 xlnk_bufpool[id] = kaddr;
396 xlnk_buflen[id] = len;
397 xlnk_bufcacheable[id] = cacheable;
398 xlnk_phyaddr[id] = phys_addr_anchor;
400 spin_unlock(&xlnk_buf_lock);
402 if (id <= 0 || id >= XLNK_BUF_POOL_SIZE) {
403 pr_err("No id could be found in range\n");
410 static int xlnk_init_bufpool(void)
414 spin_lock_init(&xlnk_buf_lock);
415 xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
416 *((char *)xlnk_dev_buf) = '\0';
419 dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
423 xlnk_bufpool[0] = xlnk_dev_buf;
424 for (i = 1; i < xlnk_bufpool_size; i++)
425 xlnk_bufpool[i] = NULL;
430 #define XLNK_SUSPEND NULL
431 #define XLNK_RESUME NULL
433 static int xlnk_remove(struct platform_device *pdev)
440 devno = MKDEV(driver_major, 0);
441 cdev_del(&xlnk_cdev);
442 unregister_chrdev_region(devno, 1);
444 /* remove the device from sysfs */
445 device_destroy(xlnk_class, MKDEV(driver_major, 0));
446 class_destroy(xlnk_class);
449 xlnk_devpacks_free_all();
454 static const struct of_device_id xlnk_match[] = {
455 { .compatible = "xlnx,xlnk-1.0", },
458 MODULE_DEVICE_TABLE(of, xlnk_match);
460 static struct platform_driver xlnk_driver = {
463 .of_match_table = xlnk_match,
466 .remove = xlnk_remove,
467 .suspend = XLNK_SUSPEND,
468 .resume = XLNK_RESUME,
471 static u64 dma_mask = 0xFFFFFFFFUL;
474 * This function is called when an application opens handle to the
477 static int xlnk_open(struct inode *ip, struct file *filp)
481 if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
487 static ssize_t xlnk_read(struct file *filp, char __user *buf,
488 size_t count, loff_t *offp)
492 /* todo: need semi for critical section */
494 if (*offp >= xlnk_dev_size)
497 if (*offp + count > xlnk_dev_size)
498 count = xlnk_dev_size - *offp;
500 if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
511 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
512 size_t count, loff_t *offp)
516 /* todo: need to setup semi for critical section */
518 if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
525 if (xlnk_dev_size < *offp)
526 xlnk_dev_size = *offp;
533 * This function is called when an application closes handle to the bridge
536 static int xlnk_release(struct inode *ip, struct file *filp)
542 static int xlnk_devregister(char *name, unsigned int id,
543 xlnk_intptr_type base, unsigned int size,
545 xlnk_intptr_type *handle)
549 unsigned int *irqptr;
550 struct xlnk_device_pack *devpack;
554 devpack = xlnk_devpacks_find(base);
556 *handle = (xlnk_intptr_type)devpack;
572 devpack = kzalloc(sizeof(struct xlnk_device_pack),
574 devpack->io_ptr = NULL;
575 strcpy(devpack->name, name);
576 devpack->pdev.name = devpack->name;
578 devpack->pdev.id = id;
580 devpack->pdev.dev.dma_mask = &dma_mask;
581 devpack->pdev.dev.coherent_dma_mask = dma_mask;
583 devpack->res[0].start = base;
584 devpack->res[0].end = base + size - 1;
585 devpack->res[0].flags = IORESOURCE_MEM;
587 for (i = 0; i < nirq; i++) {
588 devpack->res[i+1].start = irqs[i];
589 devpack->res[i+1].end = irqs[i];
590 devpack->res[i+1].flags = IORESOURCE_IRQ;
593 devpack->pdev.resource = devpack->res;
594 devpack->pdev.num_resources = nres;
596 status = platform_device_register(&devpack->pdev);
601 xlnk_devpacks_add(devpack);
602 *handle = (xlnk_intptr_type)devpack;
607 static int xlnk_dmaregister(char *name, unsigned int id,
608 xlnk_intptr_type base, unsigned int size,
609 unsigned int chan_num,
610 unsigned int chan0_dir,
611 unsigned int chan0_irq,
612 unsigned int chan0_poll_mode,
613 unsigned int chan0_include_dre,
614 unsigned int chan0_data_width,
615 unsigned int chan1_dir,
616 unsigned int chan1_irq,
617 unsigned int chan1_poll_mode,
618 unsigned int chan1_include_dre,
619 unsigned int chan1_data_width,
620 xlnk_intptr_type *handle)
624 #ifdef CONFIG_XILINX_DMA_APF
626 struct xlnk_device_pack *devpack;
628 if (chan_num < 1 || chan_num > 2) {
629 pr_err("%s: Expected either 1 or 2 channels, got %d\n",
634 devpack = xlnk_devpacks_find(base);
636 *handle = (xlnk_intptr_type)devpack;
640 devpack = kzalloc(sizeof(struct xlnk_device_pack),
644 strcpy(devpack->name, name);
645 devpack->pdev.name = "xilinx-axidma";
646 if (xlnk_config_dma_type(xlnk_config_dma_standard) &&
647 chan0_data_width == 0 && chan1_data_width == 0) {
648 devpack->io_ptr = kzalloc(sizeof(*devpack->io_ptr),
650 if (!devpack->io_ptr)
652 devpack->io_ptr->name = devpack->name;
653 devpack->io_ptr->version = "0.0.1";
654 devpack->io_ptr->irq = -1;
655 if (uio_register_device(xlnk_dev, devpack->io_ptr)) {
656 pr_err("UIO dummy failed to install\n");
660 devpack->io_ptr = NULL;
663 devpack->pdev.id = id;
665 devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
666 devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
667 devpack->dma_chan_cfg[0].irq = chan0_irq;
668 devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
669 devpack->dma_chan_cfg[0].type =
670 (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
671 "axi-dma-s2mm-channel" :
672 "axi-dma-mm2s-channel";
675 devpack->dma_chan_cfg[1].include_dre = chan1_include_dre;
676 devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
677 devpack->dma_chan_cfg[1].irq = chan1_irq;
678 devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
679 devpack->dma_chan_cfg[1].type =
680 (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
681 "axi-dma-s2mm-channel" :
682 "axi-dma-mm2s-channel";
685 devpack->dma_dev_cfg.name = devpack->name;
686 devpack->dma_dev_cfg.type = "axi-dma";
687 devpack->dma_dev_cfg.include_sg = 1;
688 devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
689 devpack->dma_dev_cfg.channel_count = chan_num;
690 devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
692 devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
694 devpack->pdev.dev.dma_mask = &dma_mask;
695 devpack->pdev.dev.coherent_dma_mask = dma_mask;
697 devpack->res[0].start = base;
698 devpack->res[0].end = base + size - 1;
699 devpack->res[0].flags = IORESOURCE_MEM;
701 devpack->pdev.resource = devpack->res;
702 devpack->pdev.num_resources = 1;
703 if (xlnk_config_dma_type(xlnk_config_dma_manual))
704 status = platform_device_register(&devpack->pdev);
709 xlnk_devpacks_add(devpack);
710 *handle = (xlnk_intptr_type)devpack;
717 static int xlnk_mcdmaregister(char *name, unsigned int id,
718 xlnk_intptr_type base, unsigned int size,
719 unsigned int mm2s_chan_num,
720 unsigned int mm2s_chan_irq,
721 unsigned int s2mm_chan_num,
722 unsigned int s2mm_chan_irq,
723 xlnk_intptr_type *handle)
727 #ifdef CONFIG_XILINX_MCDMA
728 struct xlnk_device_pack *devpack;
729 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
730 pr_err("Standard driver not yet supporting multichannel\n");
734 if (strcmp(name, "xdma"))
738 devpack = xlnk_devpacks_find(base);
740 *handle = (xlnk_intptr_type)devpack;
744 devpack = kzalloc(sizeof(struct xlnk_device_pack),
749 strcpy(devpack->name, name);
750 devpack->pdev.name = devpack->name;
751 devpack->pdev.id = id;
753 devpack->mcdma_dev_cfg.tx_chans = mm2s_chan_num;
754 devpack->mcdma_dev_cfg.rx_chans = s2mm_chan_num;
755 devpack->mcdma_dev_cfg.legacy_mode = XDMA_MCHAN_MODE;
756 devpack->mcdma_dev_cfg.device_id = id;
758 devpack->pdev.dev.platform_data = &devpack->mcdma_dev_cfg;
759 devpack->pdev.dev.dma_mask = &dma_mask;
760 devpack->pdev.dev.coherent_dma_mask = dma_mask;
761 devpack->pdev.dev.release = xdma_if_device_release,
763 devpack->res[0].start = base;
764 devpack->res[0].end = base + size - 1;
765 devpack->res[0].flags = IORESOURCE_MEM;
767 devpack->res[1].start = mm2s_chan_irq;
768 devpack->res[1].end = s2mm_chan_irq;
769 devpack->res[1].flags = IORESOURCE_IRQ;
771 devpack->pdev.resource = devpack->res;
772 devpack->pdev.num_resources = 2;
774 status = platform_device_register(&devpack->pdev);
779 xlnk_devpacks_add(devpack);
780 *handle = (xlnk_intptr_type)devpack;
788 static int xlnk_allocbuf_ioctl(struct file *filp, unsigned int code,
792 union xlnk_args temp_args;
796 status = copy_from_user(&temp_args, (void __user *)args,
797 sizeof(union xlnk_args));
802 id = xlnk_allocbuf(temp_args.allocbuf.len,
803 temp_args.allocbuf.cacheable);
808 temp_args.allocbuf.id = id;
809 temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
810 status = copy_to_user(args, &temp_args, sizeof(union xlnk_args));
815 static int xlnk_freebuf(int id)
821 if (id <= 0 || id >= xlnk_bufpool_size)
824 if (!xlnk_bufpool[id])
827 spin_lock(&xlnk_buf_lock);
828 alloc_point = xlnk_bufpool_alloc_point[id];
829 p_addr = xlnk_phyaddr[id];
830 buf_len = xlnk_buflen[id];
831 xlnk_bufpool[id] = NULL;
832 xlnk_phyaddr[id] = (dma_addr_t)NULL;
834 cacheable = xlnk_bufcacheable[id];
835 xlnk_bufcacheable[id] = 0;
836 spin_unlock(&xlnk_buf_lock);
839 dma_free_noncoherent(xlnk_dev,
844 dma_free_coherent(xlnk_dev,
852 static void xlnk_free_all_buf(void)
856 for (i = 1; i < xlnk_bufpool_size; i++)
860 static int xlnk_freebuf_ioctl(struct file *filp, unsigned int code,
864 union xlnk_args temp_args;
868 status = copy_from_user(&temp_args, (void __user *)args,
869 sizeof(union xlnk_args));
874 id = temp_args.freebuf.id;
875 return xlnk_freebuf(id);
878 static int xlnk_adddmabuf_ioctl(struct file *filp, unsigned int code,
881 union xlnk_args temp_args;
882 struct xlnk_dmabuf_reg *db;
884 status = copy_from_user(&temp_args, (void __user *)args,
885 sizeof(union xlnk_args));
890 db = kzalloc(sizeof(struct xlnk_dmabuf_reg), GFP_KERNEL);
894 db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
895 db->user_vaddr = temp_args.dmabuf.user_addr;
896 db->dbuf = dma_buf_get(db->dmabuf_fd);
898 INIT_LIST_HEAD(&db->list);
899 list_add_tail(&db->list, &xlnk_dmabuf_list);
904 static int xlnk_cleardmabuf_ioctl(struct file *filp, unsigned int code,
907 union xlnk_args temp_args;
908 struct xlnk_dmabuf_reg *dp, *dp_temp;
911 status = copy_from_user(&temp_args, (void __user *)args,
912 sizeof(union xlnk_args));
917 list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
918 if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
919 dma_buf_put(dp->dbuf);
928 static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
932 #ifdef CONFIG_XILINX_DMA_APF
934 union xlnk_args temp_args;
937 status = copy_from_user(&temp_args, (void __user *)args,
938 sizeof(union xlnk_args));
943 if (!temp_args.dmarequest.name[0])
946 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
947 struct dma_chan *chan;
949 if (!xlnk_dev->of_node) {
950 pr_err("xlnk %s: No device tree info.", __func__);
953 chan = dma_request_slave_channel(xlnk_dev,
954 temp_args.dmarequest.name);
956 pr_err("Unable to get channel named %s\n",
957 temp_args.dmarequest.name);
960 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
962 struct xdma_chan *chan =
963 xdma_request_channel(temp_args.dmarequest.name);
967 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
968 temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
969 temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
972 if (copy_to_user((void __user *)args, &temp_args,
973 sizeof(union xlnk_args)))
986 static void xlnk_complete_dma_callback(void *args)
991 static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
994 #ifdef CONFIG_XILINX_DMA_APF
995 union xlnk_args temp_args;
996 struct xdma_head *dmahead;
997 struct xlnk_dmabuf_reg *dp, *cp;
1000 status = copy_from_user(&temp_args, (void __user *)args,
1001 sizeof(union xlnk_args));
1006 if (!temp_args.dmasubmit.dmachan)
1011 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
1012 if (dp->user_vaddr == temp_args.dmasubmit.buf) {
1018 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1019 struct xlnk_dma_transfer_handle *t =
1020 vmalloc(sizeof(struct xlnk_dma_transfer_handle));
1023 pr_err("Could not allocate dma transfer handle\n");
1026 t->transfer_direction = temp_args.dmasubmit.dmadir;
1027 t->user_addr = (xlnk_intptr_type)temp_args.dmasubmit.buf;
1028 t->transfer_length = temp_args.dmasubmit.len;
1029 t->flags = temp_args.dmasubmit.flag;
1030 t->channel = (struct dma_chan *)(temp_args.dmasubmit.dmachan);
1031 if (t->flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
1032 int id = xlnk_buf_find_by_phys_addr(t->user_addr);
1035 pr_err("invalid ID, failing\n");
1038 t->kern_addr = xlnk_bufpool[id];
1039 t->sg_effective_length = 1;
1040 t->sg_list_size = 1;
1041 t->sg_list = kmalloc(sizeof(*t->sg_list)
1042 * (t->sg_list_size),
1043 GFP_KERNEL | GFP_DMA);
1044 sg_init_table(t->sg_list, t->sg_list_size);
1045 t->dma_addr = dma_map_single(t->channel->device->dev,
1048 t->transfer_direction);
1049 if (dma_mapping_error(t->channel->device->dev,
1051 pr_err("DMA mapping error\n");
1055 sg_dma_address(t->sg_list) = t->dma_addr;
1056 sg_dma_len(t->sg_list) = t->transfer_length;
1059 int locked_page_count;
1061 unsigned long first_page = t->user_addr / PAGE_SIZE;
1062 unsigned long last_page =
1063 (t->user_addr + (t->transfer_length - 1))
1066 t->kern_addr = NULL;
1068 t->sg_list_size = last_page - first_page;
1069 t->sg_list = kmalloc(sizeof(*t->sg_list)
1070 * (t->sg_list_size),
1071 GFP_KERNEL | GFP_DMA);
1076 if (xlnk_page_store_size <= t->sg_list_size) {
1078 vmalloc(sizeof(struct page *)
1079 * 2 * t->sg_list_size);
1086 xlnk_page_store = tmp;
1087 xlnk_page_store_size = 2 * t->sg_list_size;
1089 down_read(¤t->mm->mmap_sem);
1091 get_user_pages(first_page * PAGE_SIZE,
1092 t->sg_list_size, 1, 1,
1093 xlnk_page_store, NULL);
1094 up_read(¤t->mm->mmap_sem);
1095 if (locked_page_count != t->sg_list_size) {
1098 pr_err("could not get user pages");
1099 for (i = 0; i < locked_page_count; i++)
1100 put_page(xlnk_page_store[i]);
1107 sg_init_table(t->sg_list, t->sg_list_size);
1108 while (it < t->user_addr + t->transfer_length) {
1109 unsigned long page_addr =
1110 (it / PAGE_SIZE) * PAGE_SIZE;
1111 unsigned long offset = it - page_addr;
1112 unsigned long page_barrier =
1113 page_addr + PAGE_SIZE;
1114 unsigned long segment_end =
1115 (page_barrier < t->user_addr +
1116 t->transfer_length) ?
1118 (t->user_addr + t->transfer_length);
1119 unsigned long segment_size = segment_end - it;
1122 sg_set_page(t->sg_list + p_it,
1123 xlnk_page_store[p_it],
1124 (unsigned int)segment_size,
1125 (unsigned int)offset);
1128 t->sg_effective_length =
1129 dma_map_sg(t->channel->device->dev,
1132 t->transfer_direction);
1133 if (t->sg_effective_length == 0) {
1136 pr_err("could not map user pages");
1137 for (i = 0; i < locked_page_count; i++)
1138 put_page(xlnk_page_store[i]);
1145 t->channel->device->device_prep_slave_sg(
1146 t->channel, t->sg_list,
1147 t->sg_effective_length,
1148 t->transfer_direction,
1149 DMA_CTRL_ACK | DMA_PREP_INTERRUPT,
1150 temp_args.dmasubmit.appwords_i);
1151 if (!t->async_desc) {
1152 pr_err("Async desc is null, aborting\n");
1155 init_completion(&t->completion_handle);
1156 t->async_desc->callback = &xlnk_complete_dma_callback;
1157 t->async_desc->callback_param = &t->completion_handle;
1158 t->dma_cookie = t->async_desc->tx_submit(t->async_desc);
1159 dma_async_issue_pending(t->channel);
1160 if (dma_submit_error(t->dma_cookie)) {
1161 pr_err("Huge problem submitting DMA action\n");
1164 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)t;
1169 spin_lock(&xlnk_buf_lock);
1171 xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
1173 xlnk_intptr_type addr_delta =
1174 temp_args.dmasubmit.buf -
1175 xlnk_phyaddr[buf_id];
1176 kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
1178 spin_unlock(&xlnk_buf_lock);
1180 status = xdma_submit((struct xdma_chan *)
1181 (temp_args.dmasubmit.dmachan),
1182 temp_args.dmasubmit.buf,
1184 temp_args.dmasubmit.len,
1185 temp_args.dmasubmit.nappwords_i,
1186 temp_args.dmasubmit.appwords_i,
1187 temp_args.dmasubmit.nappwords_o,
1188 temp_args.dmasubmit.flag,
1192 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
1193 temp_args.dmasubmit.last_bd_index =
1194 (xlnk_intptr_type)dmahead->last_bd_index;
1197 if (copy_to_user((void __user *)args, &temp_args,
1198 sizeof(union xlnk_args)))
1207 static int xlnk_dmawait_ioctl(struct file *filp, unsigned int code,
1212 #ifdef CONFIG_XILINX_DMA_APF
1213 union xlnk_args temp_args;
1214 struct xdma_head *dmahead;
1216 status = copy_from_user(&temp_args, (void __user *)args,
1217 sizeof(union xlnk_args));
1221 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1223 struct xlnk_dma_transfer_handle *t =
1224 (struct xlnk_dma_transfer_handle *)
1225 temp_args.dmawait.dmahandle;
1227 wait_for_completion(&t->completion_handle);
1228 dma_result = dma_async_is_tx_complete(t->channel,
1231 if (dma_result != DMA_COMPLETE) {
1232 pr_err("Dma transfer failed for unknown reason\n");
1236 dma_unmap_single(t->channel->device->dev,
1239 t->transfer_direction);
1243 dma_unmap_sg(t->channel->device->dev,
1246 t->transfer_direction);
1247 for (i = 0; i < t->sg_list_size; i++)
1248 put_page(sg_page(t->sg_list + i));
1253 struct xdma_head *dmahead =
1254 (struct xdma_head *)temp_args.dmawait.dmahandle;
1256 status = xdma_wait(dmahead,
1258 &temp_args.dmawait.flags);
1259 if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
1260 if (temp_args.dmawait.nappwords) {
1261 memcpy(temp_args.dmawait.appwords,
1262 dmahead->appwords_o,
1263 dmahead->nappwords_o * sizeof(u32));
1267 if (copy_to_user((void __user *)args,
1269 sizeof(union xlnk_args)))
1277 static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
1282 #ifdef CONFIG_XILINX_DMA_APF
1284 union xlnk_args temp_args;
1285 status = copy_from_user(&temp_args, (void __user *)args,
1286 sizeof(union xlnk_args));
1291 if (xlnk_config_dma_type(xlnk_config_dma_standard))
1292 dma_release_channel((struct dma_chan *)
1293 (temp_args.dmarelease.dmachan));
1295 xdma_release_channel((struct xdma_chan *)
1296 (temp_args.dmarelease.dmachan));
1303 static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
1306 union xlnk_args temp_args;
1308 xlnk_intptr_type handle;
1310 status = copy_from_user(&temp_args, (void __user *)args,
1311 sizeof(union xlnk_args));
1316 status = xlnk_devregister(temp_args.devregister.name,
1317 temp_args.devregister.id,
1318 temp_args.devregister.base,
1319 temp_args.devregister.size,
1320 temp_args.devregister.irqs,
1326 static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
1329 union xlnk_args temp_args;
1331 xlnk_intptr_type handle;
1333 status = copy_from_user(&temp_args, (void __user *)args,
1334 sizeof(union xlnk_args));
1339 status = xlnk_dmaregister(temp_args.dmaregister.name,
1340 temp_args.dmaregister.id,
1341 temp_args.dmaregister.base,
1342 temp_args.dmaregister.size,
1343 temp_args.dmaregister.chan_num,
1344 temp_args.dmaregister.chan0_dir,
1345 temp_args.dmaregister.chan0_irq,
1346 temp_args.dmaregister.chan0_poll_mode,
1347 temp_args.dmaregister.chan0_include_dre,
1348 temp_args.dmaregister.chan0_data_width,
1349 temp_args.dmaregister.chan1_dir,
1350 temp_args.dmaregister.chan1_irq,
1351 temp_args.dmaregister.chan1_poll_mode,
1352 temp_args.dmaregister.chan1_include_dre,
1353 temp_args.dmaregister.chan1_data_width,
1359 static int xlnk_mcdmaregister_ioctl(struct file *filp, unsigned int code,
1362 union xlnk_args temp_args;
1364 xlnk_intptr_type handle;
1366 status = copy_from_user(&temp_args, (void __user *)args,
1367 sizeof(union xlnk_args));
1372 status = xlnk_mcdmaregister(temp_args.mcdmaregister.name,
1373 temp_args.mcdmaregister.id,
1374 temp_args.mcdmaregister.base,
1375 temp_args.mcdmaregister.size,
1376 temp_args.mcdmaregister.mm2s_chan_num,
1377 temp_args.mcdmaregister.mm2s_chan_irq,
1378 temp_args.mcdmaregister.s2mm_chan_num,
1379 temp_args.mcdmaregister.s2mm_chan_irq,
1385 static int xlnk_devunregister_ioctl(struct file *filp, unsigned int code,
1388 union xlnk_args temp_args;
1391 status = copy_from_user(&temp_args, (void __user *)args,
1392 sizeof(union xlnk_args));
1397 xlnk_devpacks_free(temp_args.devunregister.base);
1402 static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
1405 union xlnk_args temp_args;
1407 void *paddr, *kaddr;
1410 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1411 pr_err("Manual cache management is forbidden in standard dma types");
1415 status = copy_from_user(&temp_args, (void __user *)args,
1416 sizeof(union xlnk_args));
1419 dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
1424 if (!(temp_args.cachecontrol.action == 0 ||
1425 temp_args.cachecontrol.action == 1)) {
1426 dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
1427 temp_args.cachecontrol.action);
1431 size = temp_args.cachecontrol.size;
1432 paddr = temp_args.cachecontrol.phys_addr;
1434 spin_lock(&xlnk_buf_lock);
1435 buf_id = xlnk_buf_find_by_phys_addr(paddr);
1436 kaddr = xlnk_bufpool[buf_id];
1437 spin_unlock(&xlnk_buf_lock);
1440 pr_err("Illegal cachecontrol on non-sds_alloc memory");
1444 #if XLNK_SYS_BIT_WIDTH == 32
1445 __cpuc_flush_dcache_area(kaddr, size);
1446 outer_flush_range(paddr, paddr + size);
1447 if (temp_args.cachecontrol.action == 1)
1448 outer_inv_range(paddr, paddr + size);
1450 if (temp_args.cachecontrol.action == 1)
1451 __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
1453 __dma_map_area(kaddr, size, DMA_TO_DEVICE);
1458 static int xlnk_config_ioctl(struct file *filp, unsigned long args)
1460 struct xlnk_config_block block;
1461 int status, setting = 0, i;
1463 xlnk_config_clear_block(&block);
1464 status = copy_from_user(&block, (void __user *)args,
1465 sizeof(struct xlnk_config_block));
1467 pr_err("Error in copy_from_user. status= %d\n", status);
1470 for (i = 0; i < xlnk_config_valid_size; i++)
1471 if (block.valid_mask[i])
1474 status = xlnk_set_config(&block);
1476 xlnk_get_config(&block);
1477 status = copy_to_user(args, &block,
1478 sizeof(struct xlnk_config_block));
1483 /* This function provides IO interface to the bridge driver. */
1484 static long xlnk_ioctl(struct file *filp, unsigned int code,
1490 if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
1492 if (_IOC_NR(code) > XLNK_IOC_MAXNR)
1495 /* some sanity check */
1497 case XLNK_IOCALLOCBUF:
1498 status = xlnk_allocbuf_ioctl(filp, code, args);
1500 case XLNK_IOCFREEBUF:
1501 status = xlnk_freebuf_ioctl(filp, code, args);
1503 case XLNK_IOCADDDMABUF:
1504 status = xlnk_adddmabuf_ioctl(filp, code, args);
1506 case XLNK_IOCCLEARDMABUF:
1507 status = xlnk_cleardmabuf_ioctl(filp, code, args);
1509 case XLNK_IOCDMAREQUEST:
1510 status = xlnk_dmarequest_ioctl(filp, code, args);
1512 case XLNK_IOCDMASUBMIT:
1513 status = xlnk_dmasubmit_ioctl(filp, code, args);
1515 case XLNK_IOCDMAWAIT:
1516 status = xlnk_dmawait_ioctl(filp, code, args);
1518 case XLNK_IOCDMARELEASE:
1519 status = xlnk_dmarelease_ioctl(filp, code, args);
1521 case XLNK_IOCDEVREGISTER:
1522 status = xlnk_devregister_ioctl(filp, code, args);
1524 case XLNK_IOCDMAREGISTER:
1525 status = xlnk_dmaregister_ioctl(filp, code, args);
1527 case XLNK_IOCMCDMAREGISTER:
1528 status = xlnk_mcdmaregister_ioctl(filp, code, args);
1530 case XLNK_IOCDEVUNREGISTER:
1531 status = xlnk_devunregister_ioctl(filp, code, args);
1533 case XLNK_IOCCACHECTRL:
1534 status = xlnk_cachecontrol_ioctl(filp, code, args);
1536 case XLNK_IOCSHUTDOWN:
1537 status = xlnk_shutdown(args);
1539 case XLNK_IOCRECRES: /* recover resource */
1540 status = xlnk_recover_resource(args);
1542 case XLNK_IOCCONFIG:
1543 status = xlnk_config_ioctl(filp, args);
1552 static struct vm_operations_struct xlnk_vm_ops = {
1553 .open = xlnk_vma_open,
1554 .close = xlnk_vma_close,
1557 /* This function maps kernel space memory to user space memory. */
1558 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
1563 bufid = vma->vm_pgoff >> (24 - PAGE_SHIFT);
1566 status = remap_pfn_range(vma, vma->vm_start,
1567 virt_to_phys(xlnk_dev_buf) >> PAGE_SHIFT,
1568 vma->vm_end - vma->vm_start,
1571 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1574 if (vma->vm_start != PAGE_ALIGN(vma->vm_start)) {
1575 pr_err("Cannot map on non-aligned addresses\n");
1578 if (xlnk_bufcacheable[bufid] == 0)
1580 pgprot_noncached(vma->vm_page_prot);
1581 pfn = virt_to_pfn(xlnk_bufpool[bufid]);
1582 status = remap_pfn_range(vma,
1585 vma->vm_end - vma->vm_start,
1587 xlnk_userbuf[bufid] = vma->vm_start;
1589 if (xlnk_bufcacheable[bufid] == 0)
1591 pgprot_noncached(vma->vm_page_prot);
1592 status = remap_pfn_range(vma, vma->vm_start,
1595 vma->vm_end - vma->vm_start,
1601 pr_err("xlnk_mmap failed with code %d\n", EAGAIN);
1606 vma->vm_ops = &xlnk_vm_ops;
1607 vma->vm_private_data = xlnk_bufpool[bufid];
1612 static void xlnk_vma_open(struct vm_area_struct *vma)
1617 static void xlnk_vma_close(struct vm_area_struct *vma)
1626 static int xlnk_shutdown(unsigned long buf)
1631 static int xlnk_recover_resource(unsigned long buf)
1633 xlnk_free_all_buf();
1634 #ifdef CONFIG_XILINX_DMA_APF
1635 xdma_release_all_channels();
1640 module_platform_driver(xlnk_driver);
1642 MODULE_DESCRIPTION("Xilinx APF driver");
1643 MODULE_LICENSE("GPL");