4 * Xilinx Accelerator driver support.
6 * Copyright (C) 2010 Xilinx Inc.
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 /* ----------------------------------- Host OS */
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/gfp.h>
27 #include <asm/cacheflush.h>
29 #include <linux/dma-buf.h>
31 #include <linux/string.h>
33 #include <linux/uaccess.h>
35 #include <linux/dmaengine.h>
36 #include <linux/completion.h>
37 #include <linux/wait.h>
39 #include <linux/device.h>
40 #include <linux/init.h>
41 #include <linux/cdev.h>
43 #include <linux/sched.h>
44 #include <linux/pagemap.h>
45 #include <linux/errno.h> /* error codes */
46 #include <linux/dma-mapping.h> /* dma */
48 #include <linux/list.h>
49 #include <linux/dma/xilinx_dma.h>
50 #include <linux/uio_driver.h>
51 #include <asm/cacheflush.h>
52 #include <linux/semaphore.h>
54 #include "xlnk-ioctl.h"
55 #include "xlnk-sysdef.h"
58 #ifdef CONFIG_XILINX_DMA_APF
59 #include "xilinx-dma-apf.h"
62 #define DRIVER_NAME "xlnk"
63 #define DRIVER_VERSION "0.2"
65 static struct platform_device *xlnk_pdev;
66 static struct device *xlnk_dev;
68 static struct cdev xlnk_cdev;
70 static struct class *xlnk_class;
72 static s32 driver_major;
74 static char *driver_name = DRIVER_NAME;
76 static void *xlnk_dev_buf;
77 static ssize_t xlnk_dev_size;
78 static int xlnk_dev_vmas;
80 #define XLNK_BUF_POOL_SIZE 4096
81 static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
82 static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
83 static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
84 static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
85 static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
86 static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
87 static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
88 static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
89 static spinlock_t xlnk_buf_lock;
91 static int xlnk_open(struct inode *ip, struct file *filp);
92 static int xlnk_release(struct inode *ip, struct file *filp);
93 static long xlnk_ioctl(struct file *filp, unsigned int code,
95 static ssize_t xlnk_read(struct file *filp, char __user *buf,
96 size_t count, loff_t *offp);
97 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
98 size_t count, loff_t *offp);
99 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
100 static void xlnk_vma_open(struct vm_area_struct *vma);
101 static void xlnk_vma_close(struct vm_area_struct *vma);
103 static int xlnk_init_bufpool(void);
105 LIST_HEAD(xlnk_dmabuf_list);
107 static int xlnk_shutdown(unsigned long buf);
108 static int xlnk_recover_resource(unsigned long buf);
110 static const struct file_operations xlnk_fops = {
112 .release = xlnk_release,
115 .unlocked_ioctl = xlnk_ioctl,
119 #define MAX_XLNK_DMAS 128
121 struct xlnk_device_pack {
123 struct platform_device pdev;
124 struct resource res[8];
125 struct uio_info *io_ptr;
128 #ifdef CONFIG_XILINX_DMA_APF
129 struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
130 struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
134 static struct semaphore xlnk_devpack_sem;
135 static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
136 static void xlnk_devpacks_init(void)
140 sema_init(&xlnk_devpack_sem, 1);
141 for (i = 0; i < MAX_XLNK_DMAS; i++)
142 xlnk_devpacks[i] = NULL;
145 static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
149 for (i = 0; i < MAX_XLNK_DMAS; i++) {
150 if (!xlnk_devpacks[i]) {
151 struct xlnk_device_pack *ret;
153 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
155 xlnk_devpacks[i] = ret;
164 static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
168 for (i = 0; i < MAX_XLNK_DMAS; i++)
169 if (xlnk_devpacks[i] == devpack)
170 xlnk_devpacks[i] = NULL;
174 static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
178 for (i = 0; i < MAX_XLNK_DMAS; i++) {
179 if (xlnk_devpacks[i] &&
180 xlnk_devpacks[i]->res[0].start == base)
181 return xlnk_devpacks[i];
186 static void xlnk_devpacks_free(xlnk_intptr_type base)
188 struct xlnk_device_pack *devpack;
190 down(&xlnk_devpack_sem);
191 devpack = xlnk_devpacks_find(base);
193 up(&xlnk_devpack_sem);
198 up(&xlnk_devpack_sem);
201 platform_device_unregister(&devpack->pdev);
202 xlnk_devpacks_delete(devpack);
204 up(&xlnk_devpack_sem);
207 static void xlnk_devpacks_free_all(void)
209 struct xlnk_device_pack *devpack;
212 for (i = 0; i < MAX_XLNK_DMAS; i++) {
213 devpack = xlnk_devpacks[i];
215 if (devpack->io_ptr) {
216 uio_unregister_device(devpack->io_ptr);
217 kfree(devpack->io_ptr);
219 platform_device_unregister(&devpack->pdev);
221 xlnk_devpacks_delete(devpack);
227 static int xlnk_probe(struct platform_device *pdev)
236 /* use 2.6 device model */
237 err = alloc_chrdev_region(&dev, 0, 1, driver_name);
239 dev_err(&pdev->dev, "%s: Can't get major %d\n",
240 __func__, driver_major);
244 cdev_init(&xlnk_cdev, &xlnk_fops);
246 xlnk_cdev.owner = THIS_MODULE;
248 err = cdev_add(&xlnk_cdev, dev, 1);
251 dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
257 xlnk_class = class_create(THIS_MODULE, "xlnk");
258 if (IS_ERR(xlnk_class)) {
259 dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
263 driver_major = MAJOR(dev);
265 dev_info(&pdev->dev, "Major %d\n", driver_major);
267 device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
272 dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
275 xlnk_dev = &pdev->dev;
278 dev_info(&pdev->dev, "xlnk_pdev is not null\n");
280 dev_info(&pdev->dev, "xlnk_pdev is null\n");
282 xlnk_devpacks_init();
286 cdev_del(&xlnk_cdev);
287 unregister_chrdev_region(dev, 1);
292 static int xlnk_buf_findnull(void)
296 for (i = 1; i < xlnk_bufpool_size; i++) {
297 if (!xlnk_bufpool[i])
304 static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
308 for (i = 1; i < xlnk_bufpool_size; i++) {
309 if (xlnk_bufpool[i] &&
310 xlnk_phyaddr[i] <= addr &&
311 xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
318 static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
322 for (i = 1; i < xlnk_bufpool_size; i++) {
323 if (xlnk_bufpool[i] &&
324 xlnk_buf_process[i] == pid &&
325 xlnk_userbuf[i] <= addr &&
326 xlnk_userbuf[i] + xlnk_buflen[i] > addr)
334 * allocate and return an id
335 * id must be a positve number
337 static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
341 dma_addr_t phys_addr_anchor;
344 attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
346 kaddr = dma_alloc_attrs(xlnk_dev,
349 GFP_KERNEL | GFP_DMA,
354 spin_lock(&xlnk_buf_lock);
355 id = xlnk_buf_findnull();
356 if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
357 xlnk_bufpool_alloc_point[id] = kaddr;
358 xlnk_bufpool[id] = kaddr;
359 xlnk_buflen[id] = len;
360 xlnk_bufcacheable[id] = cacheable;
361 xlnk_phyaddr[id] = phys_addr_anchor;
363 spin_unlock(&xlnk_buf_lock);
365 if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
371 static int xlnk_init_bufpool(void)
375 spin_lock_init(&xlnk_buf_lock);
376 xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
377 *((char *)xlnk_dev_buf) = '\0';
380 dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
384 xlnk_bufpool[0] = xlnk_dev_buf;
385 for (i = 1; i < xlnk_bufpool_size; i++)
386 xlnk_bufpool[i] = NULL;
391 #define XLNK_SUSPEND NULL
392 #define XLNK_RESUME NULL
394 static int xlnk_remove(struct platform_device *pdev)
401 devno = MKDEV(driver_major, 0);
402 cdev_del(&xlnk_cdev);
403 unregister_chrdev_region(devno, 1);
405 /* remove the device from sysfs */
406 device_destroy(xlnk_class, MKDEV(driver_major, 0));
407 class_destroy(xlnk_class);
410 xlnk_devpacks_free_all();
415 static const struct of_device_id xlnk_match[] = {
416 { .compatible = "xlnx,xlnk-1.0", },
419 MODULE_DEVICE_TABLE(of, xlnk_match);
421 static struct platform_driver xlnk_driver = {
424 .of_match_table = xlnk_match,
427 .remove = xlnk_remove,
428 .suspend = XLNK_SUSPEND,
429 .resume = XLNK_RESUME,
432 static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
435 * This function is called when an application opens handle to the
438 static int xlnk_open(struct inode *ip, struct file *filp)
440 if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
446 static ssize_t xlnk_read(struct file *filp,
453 if (*offp >= xlnk_dev_size)
456 if (*offp + count > xlnk_dev_size)
457 count = xlnk_dev_size - *offp;
459 if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
470 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
471 size_t count, loff_t *offp)
475 if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
482 if (xlnk_dev_size < *offp)
483 xlnk_dev_size = *offp;
490 * This function is called when an application closes handle to the bridge
493 static int xlnk_release(struct inode *ip, struct file *filp)
498 static int xlnk_devregister(char *name,
500 xlnk_intptr_type base,
503 xlnk_intptr_type *handle)
507 unsigned int *irqptr;
508 struct xlnk_device_pack *devpack;
512 down(&xlnk_devpack_sem);
513 devpack = xlnk_devpacks_find(base);
515 *handle = (xlnk_intptr_type)devpack;
528 up(&xlnk_devpack_sem);
534 devpack = xlnk_devpacks_alloc();
536 up(&xlnk_devpack_sem);
537 pr_err("Failed to allocate device %s\n", name);
540 devpack->io_ptr = NULL;
541 strcpy(devpack->name, name);
542 devpack->pdev.name = devpack->name;
544 devpack->pdev.dev.dma_mask = &dma_mask;
545 devpack->pdev.dev.coherent_dma_mask = dma_mask;
547 devpack->res[0].start = base;
548 devpack->res[0].end = base + size - 1;
549 devpack->res[0].flags = IORESOURCE_MEM;
551 for (i = 0; i < nirq; i++) {
552 devpack->res[i + 1].start = irqs[i];
553 devpack->res[i + 1].end = irqs[i];
554 devpack->res[i + 1].flags = IORESOURCE_IRQ;
557 devpack->pdev.resource = devpack->res;
558 devpack->pdev.num_resources = nres;
560 status = platform_device_register(&devpack->pdev);
562 xlnk_devpacks_delete(devpack);
565 *handle = (xlnk_intptr_type)devpack;
568 up(&xlnk_devpack_sem);
573 static int xlnk_dmaregister(char *name,
575 xlnk_intptr_type base,
577 unsigned int chan_num,
578 unsigned int chan0_dir,
579 unsigned int chan0_irq,
580 unsigned int chan0_poll_mode,
581 unsigned int chan0_include_dre,
582 unsigned int chan0_data_width,
583 unsigned int chan1_dir,
584 unsigned int chan1_irq,
585 unsigned int chan1_poll_mode,
586 unsigned int chan1_include_dre,
587 unsigned int chan1_data_width,
588 xlnk_intptr_type *handle)
592 #ifdef CONFIG_XILINX_DMA_APF
594 struct xlnk_device_pack *devpack;
596 if (chan_num < 1 || chan_num > 2) {
597 pr_err("%s: Expected either 1 or 2 channels, got %d\n",
602 down(&xlnk_devpack_sem);
603 devpack = xlnk_devpacks_find(base);
605 *handle = (xlnk_intptr_type)devpack;
609 devpack = xlnk_devpacks_alloc();
611 up(&xlnk_devpack_sem);
614 strcpy(devpack->name, name);
615 devpack->pdev.name = "xilinx-axidma";
617 devpack->io_ptr = NULL;
619 devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
620 devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
621 devpack->dma_chan_cfg[0].irq = chan0_irq;
622 devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
623 devpack->dma_chan_cfg[0].type =
624 (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
625 "axi-dma-s2mm-channel" :
626 "axi-dma-mm2s-channel";
629 devpack->dma_chan_cfg[1].include_dre =
631 devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
632 devpack->dma_chan_cfg[1].irq = chan1_irq;
633 devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
634 devpack->dma_chan_cfg[1].type =
635 (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
636 "axi-dma-s2mm-channel" :
637 "axi-dma-mm2s-channel";
640 devpack->dma_dev_cfg.name = devpack->name;
641 devpack->dma_dev_cfg.type = "axi-dma";
642 devpack->dma_dev_cfg.include_sg = 1;
643 devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
644 devpack->dma_dev_cfg.channel_count = chan_num;
645 devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
647 devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
649 devpack->pdev.dev.dma_mask = &dma_mask;
650 devpack->pdev.dev.coherent_dma_mask = dma_mask;
652 devpack->res[0].start = base;
653 devpack->res[0].end = base + size - 1;
654 devpack->res[0].flags = IORESOURCE_MEM;
656 devpack->pdev.resource = devpack->res;
657 devpack->pdev.num_resources = 1;
658 status = platform_device_register(&devpack->pdev);
660 xlnk_devpacks_delete(devpack);
663 *handle = (xlnk_intptr_type)devpack;
666 up(&xlnk_devpack_sem);
672 static int xlnk_allocbuf_ioctl(struct file *filp,
676 union xlnk_args temp_args;
680 status = copy_from_user(&temp_args, (void __user *)args,
681 sizeof(union xlnk_args));
686 id = xlnk_allocbuf(temp_args.allocbuf.len,
687 temp_args.allocbuf.cacheable);
692 temp_args.allocbuf.id = id;
693 temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
694 status = copy_to_user((void __user *)args,
696 sizeof(union xlnk_args));
701 static int xlnk_freebuf(int id)
709 if (id <= 0 || id >= xlnk_bufpool_size)
712 if (!xlnk_bufpool[id])
715 spin_lock(&xlnk_buf_lock);
716 alloc_point = xlnk_bufpool_alloc_point[id];
717 p_addr = xlnk_phyaddr[id];
718 buf_len = xlnk_buflen[id];
719 xlnk_bufpool[id] = NULL;
720 xlnk_phyaddr[id] = (dma_addr_t)NULL;
722 cacheable = xlnk_bufcacheable[id];
723 xlnk_bufcacheable[id] = 0;
724 spin_unlock(&xlnk_buf_lock);
726 attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
728 dma_free_attrs(xlnk_dev,
737 static void xlnk_free_all_buf(void)
741 for (i = 1; i < xlnk_bufpool_size; i++)
745 static int xlnk_freebuf_ioctl(struct file *filp,
749 union xlnk_args temp_args;
753 status = copy_from_user(&temp_args, (void __user *)args,
754 sizeof(union xlnk_args));
759 id = temp_args.freebuf.id;
760 return xlnk_freebuf(id);
763 static int xlnk_adddmabuf_ioctl(struct file *filp,
767 union xlnk_args temp_args;
768 struct xlnk_dmabuf_reg *db;
771 status = copy_from_user(&temp_args, (void __user *)args,
772 sizeof(union xlnk_args));
777 spin_lock(&xlnk_buf_lock);
778 list_for_each_entry(db, &xlnk_dmabuf_list, list) {
779 if (db->user_vaddr == temp_args.dmasubmit.buf) {
780 pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
781 (unsigned long long)temp_args.dmabuf.user_addr);
782 spin_unlock(&xlnk_buf_lock);
786 spin_unlock(&xlnk_buf_lock);
788 db = kzalloc(sizeof(*db), GFP_KERNEL);
792 db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
793 db->user_vaddr = temp_args.dmabuf.user_addr;
794 db->dbuf = dma_buf_get(db->dmabuf_fd);
795 db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
796 if (IS_ERR(db->dbuf_attach)) {
797 dma_buf_put(db->dbuf);
798 pr_err("Failed DMA-BUF attach\n");
802 db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
805 if (!db->dbuf_sg_table) {
806 pr_err("Failed DMA-BUF map_attachment\n");
807 dma_buf_detach(db->dbuf, db->dbuf_attach);
808 dma_buf_put(db->dbuf);
812 spin_lock(&xlnk_buf_lock);
813 INIT_LIST_HEAD(&db->list);
814 list_add_tail(&db->list, &xlnk_dmabuf_list);
815 spin_unlock(&xlnk_buf_lock);
820 static int xlnk_cleardmabuf_ioctl(struct file *filp,
824 union xlnk_args temp_args;
825 struct xlnk_dmabuf_reg *dp, *dp_temp;
828 status = copy_from_user(&temp_args, (void __user *)args,
829 sizeof(union xlnk_args));
834 spin_lock(&xlnk_buf_lock);
835 list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
836 if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
837 dma_buf_unmap_attachment(dp->dbuf_attach,
840 dma_buf_detach(dp->dbuf, dp->dbuf_attach);
841 dma_buf_put(dp->dbuf);
843 spin_unlock(&xlnk_buf_lock);
848 spin_unlock(&xlnk_buf_lock);
849 pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
850 (unsigned long long)temp_args.dmabuf.user_addr);
855 static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
858 #ifdef CONFIG_XILINX_DMA_APF
859 union xlnk_args temp_args;
861 struct xdma_chan *chan;
863 status = copy_from_user(&temp_args, (void __user *)args,
864 sizeof(union xlnk_args));
869 if (!temp_args.dmarequest.name[0])
872 down(&xlnk_devpack_sem);
873 chan = xdma_request_channel(temp_args.dmarequest.name);
874 up(&xlnk_devpack_sem);
877 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
878 temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
879 temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
881 if (copy_to_user((void __user *)args,
883 sizeof(union xlnk_args)))
892 static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
895 #ifdef CONFIG_XILINX_DMA_APF
896 union xlnk_args temp_args;
897 struct xdma_head *dmahead;
898 struct xlnk_dmabuf_reg *dp, *cp = NULL;
903 status = copy_from_user(&temp_args, (void __user *)args,
904 sizeof(union xlnk_args));
909 if (!temp_args.dmasubmit.dmachan)
912 spin_lock(&xlnk_buf_lock);
913 buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
915 xlnk_intptr_type addr_delta =
916 temp_args.dmasubmit.buf -
917 xlnk_phyaddr[buf_id];
918 kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
920 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
921 if (dp->user_vaddr == temp_args.dmasubmit.buf) {
927 spin_unlock(&xlnk_buf_lock);
929 status = xdma_submit((struct xdma_chan *)
930 (temp_args.dmasubmit.dmachan),
931 temp_args.dmasubmit.buf,
933 temp_args.dmasubmit.len,
934 temp_args.dmasubmit.nappwords_i,
935 temp_args.dmasubmit.appwords_i,
936 temp_args.dmasubmit.nappwords_o,
937 temp_args.dmasubmit.flag,
941 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
942 temp_args.dmasubmit.last_bd_index =
943 (xlnk_intptr_type)dmahead->last_bd_index;
946 if (copy_to_user((void __user *)args,
948 sizeof(union xlnk_args)))
956 static int xlnk_dmawait_ioctl(struct file *filp,
961 #ifdef CONFIG_XILINX_DMA_APF
962 union xlnk_args temp_args;
963 struct xdma_head *dmahead;
965 status = copy_from_user(&temp_args, (void __user *)args,
966 sizeof(union xlnk_args));
971 dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
972 status = xdma_wait(dmahead,
974 &temp_args.dmawait.flags);
975 if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
976 if (temp_args.dmawait.nappwords) {
977 memcpy(temp_args.dmawait.appwords,
979 dmahead->nappwords_o * sizeof(u32));
983 if (copy_to_user((void __user *)args,
985 sizeof(union xlnk_args)))
992 static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
996 #ifdef CONFIG_XILINX_DMA_APF
997 union xlnk_args temp_args;
999 status = copy_from_user(&temp_args, (void __user *)args,
1000 sizeof(union xlnk_args));
1004 down(&xlnk_devpack_sem);
1005 xdma_release_channel((struct xdma_chan *)
1006 (temp_args.dmarelease.dmachan));
1007 up(&xlnk_devpack_sem);
1013 static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
1016 union xlnk_args temp_args;
1018 xlnk_intptr_type handle;
1020 status = copy_from_user(&temp_args, (void __user *)args,
1021 sizeof(union xlnk_args));
1026 status = xlnk_devregister(temp_args.devregister.name,
1027 temp_args.devregister.id,
1028 temp_args.devregister.base,
1029 temp_args.devregister.size,
1030 temp_args.devregister.irqs,
1036 static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
1039 union xlnk_args temp_args;
1041 xlnk_intptr_type handle;
1043 status = copy_from_user(&temp_args, (void __user *)args,
1044 sizeof(union xlnk_args));
1049 status = xlnk_dmaregister(temp_args.dmaregister.name,
1050 temp_args.dmaregister.id,
1051 temp_args.dmaregister.base,
1052 temp_args.dmaregister.size,
1053 temp_args.dmaregister.chan_num,
1054 temp_args.dmaregister.chan0_dir,
1055 temp_args.dmaregister.chan0_irq,
1056 temp_args.dmaregister.chan0_poll_mode,
1057 temp_args.dmaregister.chan0_include_dre,
1058 temp_args.dmaregister.chan0_data_width,
1059 temp_args.dmaregister.chan1_dir,
1060 temp_args.dmaregister.chan1_irq,
1061 temp_args.dmaregister.chan1_poll_mode,
1062 temp_args.dmaregister.chan1_include_dre,
1063 temp_args.dmaregister.chan1_data_width,
1069 static int xlnk_devunregister_ioctl(struct file *filp,
1073 union xlnk_args temp_args;
1076 status = copy_from_user(&temp_args, (void __user *)args,
1077 sizeof(union xlnk_args));
1082 xlnk_devpacks_free(temp_args.devunregister.base);
1087 static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
1090 union xlnk_args temp_args;
1093 xlnk_intptr_type paddr;
1096 status = copy_from_user(&temp_args,
1097 (void __user *)args,
1098 sizeof(union xlnk_args));
1101 dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
1106 if (!(temp_args.cachecontrol.action == 0 ||
1107 temp_args.cachecontrol.action == 1)) {
1108 dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
1109 temp_args.cachecontrol.action);
1113 size = temp_args.cachecontrol.size;
1114 paddr = temp_args.cachecontrol.phys_addr;
1116 spin_lock(&xlnk_buf_lock);
1117 buf_id = xlnk_buf_find_by_phys_addr(paddr);
1118 kaddr = xlnk_bufpool[buf_id];
1119 spin_unlock(&xlnk_buf_lock);
1122 pr_err("Illegal cachecontrol on non-sds_alloc memory");
1126 #if XLNK_SYS_BIT_WIDTH == 32
1127 __cpuc_flush_dcache_area(kaddr, size);
1128 outer_flush_range(paddr, paddr + size);
1129 if (temp_args.cachecontrol.action == 1)
1130 outer_inv_range(paddr, paddr + size);
1132 if (temp_args.cachecontrol.action == 1)
1133 __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
1135 __dma_map_area(kaddr, size, DMA_TO_DEVICE);
1140 static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
1142 union xlnk_args args;
1143 xlnk_intptr_type p_addr = 0;
1146 struct xlnk_dmabuf_reg *cp = NULL;
1148 enum dma_data_direction dmadir;
1149 xlnk_intptr_type page_id;
1150 unsigned int page_offset;
1151 struct scatterlist sg;
1152 unsigned long attrs = 0;
1154 status = copy_from_user(&args,
1155 (void __user *)arg_addr,
1156 sizeof(union xlnk_args));
1159 pr_err("Error in copy_from_user. status = %d\n", status);
1163 if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
1164 !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
1165 pr_err("memop lacks acquire or release flag\n");
1169 if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
1170 args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
1171 pr_err("memop has both acquire and release defined\n");
1175 spin_lock(&xlnk_buf_lock);
1176 buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
1179 cacheable = xlnk_bufcacheable[buf_id];
1180 p_addr = xlnk_phyaddr[buf_id] +
1181 (args.memop.virt_addr - xlnk_userbuf[buf_id]);
1183 struct xlnk_dmabuf_reg *dp;
1185 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
1186 if (dp->user_vaddr == args.memop.virt_addr) {
1192 spin_unlock(&xlnk_buf_lock);
1194 if (buf_id <= 0 && !cp) {
1195 pr_err("Error, buffer not found\n");
1199 dmadir = (enum dma_data_direction)args.memop.dir;
1201 if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
1202 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
1205 page_id = p_addr >> PAGE_SHIFT;
1206 page_offset = p_addr - (page_id << PAGE_SHIFT);
1207 sg_init_table(&sg, 1);
1209 pfn_to_page(page_id),
1212 sg_dma_len(&sg) = args.memop.size;
1215 if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
1217 status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
1223 pr_err("Failed to map address\n");
1226 args.memop.phys_addr = (xlnk_intptr_type)
1227 sg_dma_address(&sg);
1228 args.memop.token = (xlnk_intptr_type)
1229 sg_dma_address(&sg);
1230 status = copy_to_user((void __user *)arg_addr,
1232 sizeof(union xlnk_args));
1234 pr_err("Error in copy_to_user. status = %d\n",
1237 if (cp->dbuf_sg_table->nents != 1) {
1238 pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
1241 args.memop.phys_addr = (xlnk_intptr_type)
1242 sg_dma_address(cp->dbuf_sg_table->sgl);
1243 args.memop.token = 0;
1244 status = copy_to_user((void __user *)arg_addr,
1246 sizeof(union xlnk_args));
1248 pr_err("Error in copy_to_user. status = %d\n",
1253 sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
1254 get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
1265 /* This function provides IO interface to the bridge driver. */
1266 static long xlnk_ioctl(struct file *filp,
1270 if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
1272 if (_IOC_NR(code) > XLNK_IOC_MAXNR)
1275 /* some sanity check */
1277 case XLNK_IOCALLOCBUF:
1278 return xlnk_allocbuf_ioctl(filp, code, args);
1279 case XLNK_IOCFREEBUF:
1280 return xlnk_freebuf_ioctl(filp, code, args);
1281 case XLNK_IOCADDDMABUF:
1282 return xlnk_adddmabuf_ioctl(filp, code, args);
1283 case XLNK_IOCCLEARDMABUF:
1284 return xlnk_cleardmabuf_ioctl(filp, code, args);
1285 case XLNK_IOCDMAREQUEST:
1286 return xlnk_dmarequest_ioctl(filp, code, args);
1287 case XLNK_IOCDMASUBMIT:
1288 return xlnk_dmasubmit_ioctl(filp, code, args);
1289 case XLNK_IOCDMAWAIT:
1290 return xlnk_dmawait_ioctl(filp, code, args);
1291 case XLNK_IOCDMARELEASE:
1292 return xlnk_dmarelease_ioctl(filp, code, args);
1293 case XLNK_IOCDEVREGISTER:
1294 return xlnk_devregister_ioctl(filp, code, args);
1295 case XLNK_IOCDMAREGISTER:
1296 return xlnk_dmaregister_ioctl(filp, code, args);
1297 case XLNK_IOCDEVUNREGISTER:
1298 return xlnk_devunregister_ioctl(filp, code, args);
1299 case XLNK_IOCCACHECTRL:
1300 return xlnk_cachecontrol_ioctl(filp, code, args);
1301 case XLNK_IOCSHUTDOWN:
1302 return xlnk_shutdown(args);
1303 case XLNK_IOCRECRES:
1304 return xlnk_recover_resource(args);
1306 return xlnk_memop_ioctl(filp, args);
1312 static const struct vm_operations_struct xlnk_vm_ops = {
1313 .open = xlnk_vma_open,
1314 .close = xlnk_vma_close,
1317 /* This function maps kernel space memory to user space memory. */
1318 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
1323 bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
1326 unsigned long paddr = virt_to_phys(xlnk_dev_buf);
1328 status = remap_pfn_range(vma,
1330 paddr >> PAGE_SHIFT,
1331 vma->vm_end - vma->vm_start,
1334 if (xlnk_bufcacheable[bufid] == 0)
1336 pgprot_noncached(vma->vm_page_prot);
1337 status = remap_pfn_range(vma, vma->vm_start,
1340 vma->vm_end - vma->vm_start,
1342 xlnk_userbuf[bufid] = vma->vm_start;
1343 xlnk_buf_process[bufid] = current->pid;
1346 pr_err("%s failed with code %d\n", __func__, status);
1351 vma->vm_ops = &xlnk_vm_ops;
1352 vma->vm_private_data = xlnk_bufpool[bufid];
1357 static void xlnk_vma_open(struct vm_area_struct *vma)
1362 static void xlnk_vma_close(struct vm_area_struct *vma)
1367 static int xlnk_shutdown(unsigned long buf)
1372 static int xlnk_recover_resource(unsigned long buf)
1374 xlnk_free_all_buf();
1375 #ifdef CONFIG_XILINX_DMA_APF
1376 xdma_release_all_channels();
1381 module_platform_driver(xlnk_driver);
1383 MODULE_DESCRIPTION("Xilinx APF driver");
1384 MODULE_LICENSE("GPL");