]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/staging/apf/xlnk.c
staging: apf: Increase buffer cap for SDSoC
[zynq/linux.git] / drivers / staging / apf / xlnk.c
1 /*
2  * xlnk.c
3  *
4  * Xilinx Accelerator driver support.
5  *
6  * Copyright (C) 2010 Xilinx Inc.
7  *
8  * This package is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 /*  ----------------------------------- Host OS */
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/gfp.h>
25 #include <linux/mm.h>
26 #include <asm/cacheflush.h>
27 #include <linux/io.h>
28 #include <linux/dma-buf.h>
29
30 #include <linux/string.h>
31
32 #include <linux/uaccess.h>
33
34 #include <linux/dmaengine.h>
35 #include <linux/completion.h>
36 #include <linux/wait.h>
37
38 #include <linux/device.h>
39 #include <linux/init.h>
40 #include <linux/cdev.h>
41
42 #include <linux/sched.h>
43 #include <linux/pagemap.h>
44 #include <linux/errno.h>        /* error codes */
45 #include <linux/dma-mapping.h>  /* dma */
46 #include <linux/of.h>
47 #include <linux/list.h>
48 #include <linux/dma/xilinx_dma.h>
49 #include <linux/uio_driver.h>
50 #include <asm/cacheflush.h>
51
52 #include "xlnk-ioctl.h"
53 #include "xlnk-config.h"
54 #include "xlnk-sysdef.h"
55 #include "xlnk.h"
56
57 #ifdef CONFIG_XILINX_DMA_APF
58 #include "xilinx-dma-apf.h"
59 #endif
60
61 #ifdef CONFIG_XILINX_MCDMA
62 #include "xdma-if.h"
63 #include "xdma.h"
64
65 static void xdma_if_device_release(struct device *op)
66 {
67 }
68
69 #endif
70
71 #define DRIVER_NAME  "xlnk"
72 #define DRIVER_VERSION  "0.2"
73
74 static struct platform_device *xlnk_pdev;
75 static struct device *xlnk_dev;
76
77 static struct cdev xlnk_cdev;
78
79 static struct class *xlnk_class;
80
81 static s32 driver_major;
82
83 static char *driver_name = DRIVER_NAME;
84
85 static void *xlnk_dev_buf;
86 static ssize_t xlnk_dev_size;
87 static int xlnk_dev_vmas;
88
89 #define XLNK_BUF_POOL_SIZE      4096
90 static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
91 static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
92 static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
93 static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
94 static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
95 static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
96 static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
97 static spinlock_t xlnk_buf_lock;
98
99 /* only used with standard DMA mode */
100 static struct page **xlnk_page_store;
101 static int xlnk_page_store_size;
102
103 static int xlnk_open(struct inode *ip, struct file *filp);  /* Open */
104 static int xlnk_release(struct inode *ip, struct file *filp);   /* Release */
105 static long xlnk_ioctl(struct file *filp, unsigned int code,
106                                 unsigned long args);
107 static ssize_t xlnk_read(struct file *filp, char __user *buf,
108                           size_t count, loff_t *offp);
109 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
110                           size_t count, loff_t *offp);
111 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
112 static void xlnk_vma_open(struct vm_area_struct *vma);
113 static void xlnk_vma_close(struct vm_area_struct *vma);
114
115 static int xlnk_init_bufpool(void);
116
117 LIST_HEAD(xlnk_dmabuf_list);
118
119 static int xlnk_shutdown(unsigned long buf);
120 static int xlnk_recover_resource(unsigned long buf);
121
122 static const struct file_operations xlnk_fops = {
123         .open = xlnk_open,
124         .release = xlnk_release,
125         .read = xlnk_read,
126         .write = xlnk_write,
127         .unlocked_ioctl = xlnk_ioctl,
128         .mmap = xlnk_mmap,
129 };
130
131 #define MAX_XLNK_DMAS 16
132
133 struct xlnk_device_pack {
134         char name[64];
135         struct platform_device pdev;
136         struct resource res[8];
137         struct uio_info *io_ptr;
138
139 #ifdef CONFIG_XILINX_DMA_APF
140         struct xdma_channel_config dma_chan_cfg[4];  /* for xidane dma only */
141         struct xdma_device_config dma_dev_cfg;     /* for xidane dma only */
142 #endif
143
144 #ifdef CONFIG_XILINX_MCDMA
145         struct xdma_device_info mcdma_dev_cfg;   /* for mcdma only */
146 #endif
147
148 };
149
150 static spinlock_t xlnk_devpack_lock;
151 static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
152 static void xlnk_devpacks_init(void)
153 {
154         unsigned int i;
155
156         spin_lock_init(&xlnk_devpack_lock);
157         for (i = 0; i < MAX_XLNK_DMAS; i++)
158                 xlnk_devpacks[i] = NULL;
159
160 }
161
162 static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
163 {
164         unsigned int i;
165
166         for (i = 0; i < MAX_XLNK_DMAS; i++) {
167                 if (xlnk_devpacks[i] == devpack)
168                         xlnk_devpacks[i] = NULL;
169         }
170 }
171
172 static void xlnk_devpacks_add(struct xlnk_device_pack *devpack)
173 {
174         unsigned int i;
175
176         spin_lock_irq(&xlnk_devpack_lock);
177         for (i = 0; i < MAX_XLNK_DMAS; i++) {
178                 if (xlnk_devpacks[i] == NULL) {
179                         xlnk_devpacks[i] = devpack;
180                         break;
181                 }
182         }
183         spin_unlock_irq(&xlnk_devpack_lock);
184 }
185
186 static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
187 {
188         unsigned int i;
189
190         for (i = 0; i < MAX_XLNK_DMAS; i++) {
191                 if (xlnk_devpacks[i]
192                         && xlnk_devpacks[i]->res[0].start == base)
193                         return xlnk_devpacks[i];
194         }
195         return NULL;
196 }
197
198 static void xlnk_devpacks_free(xlnk_intptr_type base)
199 {
200         struct xlnk_device_pack *devpack;
201
202         devpack = xlnk_devpacks_find(base);
203         if (devpack) {
204                 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
205                         if (devpack->io_ptr)
206                                 uio_unregister_device(devpack->io_ptr);
207                         if (strcmp(devpack->pdev.name, "xilinx-axidma") != 0)
208                                 platform_device_unregister(&devpack->pdev);
209                 } else {
210                         platform_device_unregister(&devpack->pdev);
211                 }
212                 xlnk_devpacks_delete(devpack);
213                 kfree(devpack);
214         }
215 }
216
217 static void xlnk_devpacks_free_all(void)
218 {
219         struct xlnk_device_pack *devpack;
220         unsigned int i;
221
222         for (i = 0; i < MAX_XLNK_DMAS; i++) {
223                 devpack = xlnk_devpacks[i];
224                 if (devpack) {
225                         if (devpack->io_ptr) {
226                                 uio_unregister_device(devpack->io_ptr);
227                                 kfree(devpack->io_ptr);
228                         } else {
229                                 platform_device_unregister(&devpack->pdev);
230                         }
231                         xlnk_devpacks_delete(devpack);
232                         kfree(devpack);
233                 }
234         }
235 }
236
237 static void xlnk_load_config_from_dt(struct platform_device *pdev)
238 {
239         const char *dma_name = NULL;
240         struct xlnk_config_block block;
241
242         xlnk_init_config();
243         xlnk_get_config(&block);
244
245         if (of_property_read_string(xlnk_dev->of_node,
246                                     "config-dma-type",
247                                     &dma_name) == 0) {
248                 if (strcmp(dma_name, "manual") == 0) {
249                         block.valid_mask[xlnk_config_valid_dma_type] = 1;
250                         block.dma_type = xlnk_config_dma_manual;
251                 } else if (strcmp(dma_name, "standard") == 0) {
252                         block.valid_mask[xlnk_config_valid_dma_type] = 1;
253                         block.dma_type = xlnk_config_dma_standard;
254                 } else
255                         pr_err("%s: Unrecognized DMA type %s\n",
256                                __func__, dma_name);
257         }
258         xlnk_set_config(&block);
259 }
260
261 static int xlnk_probe(struct platform_device *pdev)
262 {
263         int err, i;
264         dev_t dev = 0;
265
266         xlnk_dev_buf = NULL;
267         xlnk_dev_size = 0;
268         xlnk_dev_vmas = 0;
269
270         /* use 2.6 device model */
271         xlnk_page_store_size = 1024;
272         xlnk_page_store = vmalloc(sizeof(struct page *) * xlnk_page_store_size);
273         if (!xlnk_page_store) {
274                 pr_err("failed to allocate memory for page store\n");
275                 err = -ENOMEM;
276                 goto err1;
277         }
278         err = alloc_chrdev_region(&dev, 0, 1, driver_name);
279         if (err) {
280                 dev_err(&pdev->dev, "%s: Can't get major %d\n",
281                          __func__, driver_major);
282                 goto err1;
283         }
284
285         cdev_init(&xlnk_cdev, &xlnk_fops);
286
287         xlnk_cdev.owner = THIS_MODULE;
288
289         err = cdev_add(&xlnk_cdev, dev, 1);
290
291         if (err) {
292                 dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
293                          __func__);
294                 goto err3;
295         }
296
297         /* udev support */
298         xlnk_class = class_create(THIS_MODULE, "xlnk");
299         if (IS_ERR(xlnk_class)) {
300                 dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
301                 goto err3;
302         }
303
304         driver_major = MAJOR(dev);
305
306         dev_info(&pdev->dev, "Major %d\n", driver_major);
307
308         device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
309                           NULL, "xlnk");
310
311         xlnk_init_bufpool();
312
313         dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
314
315         xlnk_pdev = pdev;
316         xlnk_dev = &pdev->dev;
317
318         xlnk_load_config_from_dt(pdev);
319
320         if (xlnk_pdev)
321                 dev_info(&pdev->dev, "xlnk_pdev is not null\n");
322         else
323                 dev_info(&pdev->dev, "xlnk_pdev is null\n");
324
325         xlnk_devpacks_init();
326
327
328         return 0;
329
330 err3:
331         cdev_del(&xlnk_cdev);
332         unregister_chrdev_region(dev, 1);
333 err1:
334         return err;
335 }
336
337 static int xlnk_buf_findnull(void)
338 {
339         int i;
340
341         for (i = 1; i < xlnk_bufpool_size; i++) {
342                 if (!xlnk_bufpool[i])
343                         return i;
344         }
345
346         return 0;
347 }
348
349 static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
350 {
351         int i;
352
353         for (i = 1; i < xlnk_bufpool_size; i++) {
354                 if (xlnk_bufpool[i] &&
355                     xlnk_phyaddr[i] <= addr &&
356                     xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
357                         return i;
358         }
359
360         return 0;
361 }
362
363 /**
364  * allocate and return an id
365  * id must be a positve number
366  */
367 static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
368 {
369         int id;
370         void *kaddr;
371         dma_addr_t phys_addr_anchor;
372         unsigned int page_dst;
373
374         if (cacheable)
375                 kaddr = dma_alloc_noncoherent(xlnk_dev,
376                                               len,
377                                               &phys_addr_anchor,
378                                               GFP_KERNEL |
379                                               GFP_DMA |
380                                               __GFP_REPEAT);
381         else
382                 kaddr = dma_alloc_coherent(xlnk_dev,
383                                            len,
384                                            &phys_addr_anchor,
385                                            GFP_KERNEL |
386                                            GFP_DMA |
387                                            __GFP_REPEAT);
388         if (!kaddr)
389                 return -ENOMEM;
390
391         spin_lock(&xlnk_buf_lock);
392         id = xlnk_buf_findnull();
393         if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
394                 xlnk_bufpool_alloc_point[id] = kaddr;
395                 xlnk_bufpool[id] = kaddr;
396                 xlnk_buflen[id] = len;
397                 xlnk_bufcacheable[id] = cacheable;
398                 xlnk_phyaddr[id] = phys_addr_anchor;
399         }
400         spin_unlock(&xlnk_buf_lock);
401
402         if (id <= 0 || id >= XLNK_BUF_POOL_SIZE) {
403                 pr_err("No id could be found in range\n");
404                 return -ENOMEM;
405         }
406
407         return id;
408 }
409
410 static int xlnk_init_bufpool(void)
411 {
412         unsigned int i;
413
414         spin_lock_init(&xlnk_buf_lock);
415         xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
416         *((char *)xlnk_dev_buf) = '\0';
417
418         if (!xlnk_dev_buf) {
419                 dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
420                 return -ENOMEM;
421         }
422
423         xlnk_bufpool[0] = xlnk_dev_buf;
424         for (i = 1; i < xlnk_bufpool_size; i++)
425                 xlnk_bufpool[i] = NULL;
426
427         return 0;
428 }
429
430 #define XLNK_SUSPEND NULL
431 #define XLNK_RESUME NULL
432
433 static int xlnk_remove(struct platform_device *pdev)
434 {
435         dev_t devno;
436
437         kfree(xlnk_dev_buf);
438         xlnk_dev_buf = NULL;
439
440         devno = MKDEV(driver_major, 0);
441         cdev_del(&xlnk_cdev);
442         unregister_chrdev_region(devno, 1);
443         if (xlnk_class) {
444                 /* remove the device from sysfs */
445                 device_destroy(xlnk_class, MKDEV(driver_major, 0));
446                 class_destroy(xlnk_class);
447         }
448
449         xlnk_devpacks_free_all();
450
451         return 0;
452 }
453
454 static const struct of_device_id xlnk_match[] = {
455         { .compatible = "xlnx,xlnk-1.0", },
456         {}
457 };
458 MODULE_DEVICE_TABLE(of, xlnk_match);
459
460 static struct platform_driver xlnk_driver = {
461         .driver = {
462                 .name = DRIVER_NAME,
463                 .of_match_table = xlnk_match,
464         },
465         .probe = xlnk_probe,
466         .remove = xlnk_remove,
467         .suspend = XLNK_SUSPEND,
468         .resume = XLNK_RESUME,
469 };
470
471 static u64 dma_mask = 0xFFFFFFFFUL;
472
473 /*
474  * This function is called when an application opens handle to the
475  * bridge driver.
476  */
477 static int xlnk_open(struct inode *ip, struct file *filp)
478 {
479         int status = 0;
480
481         if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
482                 xlnk_dev_size = 0;
483
484         return status;
485 }
486
487 static ssize_t xlnk_read(struct file *filp, char __user *buf,
488                           size_t count, loff_t *offp)
489 {
490         ssize_t retval = 0;
491
492         /* todo: need semi for critical section */
493
494         if (*offp >= xlnk_dev_size)
495                 goto out;
496
497         if (*offp + count > xlnk_dev_size)
498                 count = xlnk_dev_size - *offp;
499
500         if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
501                 retval = -EFAULT;
502                 goto out;
503         }
504         *offp += count;
505         retval = count;
506
507  out:
508         return retval;
509 }
510
511 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
512                           size_t count, loff_t *offp)
513 {
514         ssize_t retval = 0;
515
516         /* todo: need to setup semi for critical section */
517
518         if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
519                 retval = -EFAULT;
520                 goto out;
521         }
522         *offp += count;
523         retval = count;
524
525         if (xlnk_dev_size < *offp)
526                 xlnk_dev_size = *offp;
527
528  out:
529         return retval;
530 }
531
532 /*
533  * This function is called when an application closes handle to the bridge
534  * driver.
535  */
536 static int xlnk_release(struct inode *ip, struct file *filp)
537 {
538         return 0;
539 }
540
541
542 static int xlnk_devregister(char *name, unsigned int id,
543                                 xlnk_intptr_type base, unsigned int size,
544                                 unsigned int *irqs,
545                                 xlnk_intptr_type *handle)
546 {
547         unsigned int nres;
548         unsigned int nirq;
549         unsigned int *irqptr;
550         struct xlnk_device_pack *devpack;
551         unsigned int i;
552         int status;
553
554         devpack = xlnk_devpacks_find(base);
555         if (devpack) {
556                 *handle = (xlnk_intptr_type)devpack;
557                 return 0;
558         }
559         nirq = 0;
560         irqptr = irqs;
561
562         while (*irqptr) {
563                 nirq++;
564                 irqptr++;
565         }
566
567         if (nirq > 7)
568                 return -ENOMEM;
569
570         nres = nirq + 1;
571
572         devpack = kzalloc(sizeof(struct xlnk_device_pack),
573                           GFP_KERNEL);
574         devpack->io_ptr = NULL;
575         strcpy(devpack->name, name);
576         devpack->pdev.name = devpack->name;
577
578         devpack->pdev.id = id;
579
580         devpack->pdev.dev.dma_mask = &dma_mask;
581         devpack->pdev.dev.coherent_dma_mask = dma_mask;
582
583         devpack->res[0].start = base;
584         devpack->res[0].end = base + size - 1;
585         devpack->res[0].flags = IORESOURCE_MEM;
586
587         for (i = 0; i < nirq; i++) {
588                 devpack->res[i+1].start = irqs[i];
589                 devpack->res[i+1].end = irqs[i];
590                 devpack->res[i+1].flags = IORESOURCE_IRQ;
591         }
592
593         devpack->pdev.resource = devpack->res;
594         devpack->pdev.num_resources = nres;
595
596         status = platform_device_register(&devpack->pdev);
597         if (status) {
598                 kfree(devpack);
599                 *handle = 0;
600         } else {
601                 xlnk_devpacks_add(devpack);
602                 *handle = (xlnk_intptr_type)devpack;
603         }
604         return status;
605 }
606
607 static int xlnk_dmaregister(char *name, unsigned int id,
608                                 xlnk_intptr_type base, unsigned int size,
609                                 unsigned int chan_num,
610                                 unsigned int chan0_dir,
611                                 unsigned int chan0_irq,
612                                 unsigned int chan0_poll_mode,
613                                 unsigned int chan0_include_dre,
614                                 unsigned int chan0_data_width,
615                                 unsigned int chan1_dir,
616                                 unsigned int chan1_irq,
617                                 unsigned int chan1_poll_mode,
618                                 unsigned int chan1_include_dre,
619                                 unsigned int chan1_data_width,
620                                 xlnk_intptr_type *handle)
621 {
622         int status = 0;
623
624 #ifdef CONFIG_XILINX_DMA_APF
625
626         struct xlnk_device_pack *devpack;
627
628         if (chan_num < 1 || chan_num > 2) {
629                 pr_err("%s: Expected either 1 or 2 channels, got %d\n",
630                        __func__, chan_num);
631                 return -EINVAL;
632         }
633
634         devpack = xlnk_devpacks_find(base);
635         if (devpack) {
636                 *handle = (xlnk_intptr_type)devpack;
637                 return 0;
638         }
639
640         devpack = kzalloc(sizeof(struct xlnk_device_pack),
641                           GFP_KERNEL);
642         if (!devpack)
643                 return -ENOMEM;
644         strcpy(devpack->name, name);
645         devpack->pdev.name = "xilinx-axidma";
646         if (xlnk_config_dma_type(xlnk_config_dma_standard) &&
647             chan0_data_width == 0 && chan1_data_width == 0) {
648                 devpack->io_ptr = kzalloc(sizeof(*devpack->io_ptr),
649                                           GFP_KERNEL);
650                 if (!devpack->io_ptr)
651                         return -EFAULT;
652                 devpack->io_ptr->name = devpack->name;
653                 devpack->io_ptr->version = "0.0.1";
654                 devpack->io_ptr->irq = -1;
655                 if (uio_register_device(xlnk_dev, devpack->io_ptr)) {
656                         pr_err("UIO dummy failed to install\n");
657                         return -EFAULT;
658                 }
659         } else {
660                 devpack->io_ptr = NULL;
661         }
662
663         devpack->pdev.id = id;
664
665         devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
666         devpack->dma_chan_cfg[0].datawidth   = chan0_data_width;
667         devpack->dma_chan_cfg[0].irq = chan0_irq;
668         devpack->dma_chan_cfg[0].poll_mode   = chan0_poll_mode;
669         devpack->dma_chan_cfg[0].type =
670                 (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
671                                         "axi-dma-s2mm-channel" :
672                                         "axi-dma-mm2s-channel";
673
674         if (chan_num > 1) {
675                 devpack->dma_chan_cfg[1].include_dre = chan1_include_dre;
676                 devpack->dma_chan_cfg[1].datawidth   = chan1_data_width;
677                 devpack->dma_chan_cfg[1].irq = chan1_irq;
678                 devpack->dma_chan_cfg[1].poll_mode   = chan1_poll_mode;
679                 devpack->dma_chan_cfg[1].type =
680                         (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
681                                                 "axi-dma-s2mm-channel" :
682                                                 "axi-dma-mm2s-channel";
683         }
684
685         devpack->dma_dev_cfg.name = devpack->name;
686         devpack->dma_dev_cfg.type = "axi-dma";
687         devpack->dma_dev_cfg.include_sg = 1;
688         devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
689         devpack->dma_dev_cfg.channel_count = chan_num;
690         devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
691
692         devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
693
694         devpack->pdev.dev.dma_mask = &dma_mask;
695         devpack->pdev.dev.coherent_dma_mask = dma_mask;
696
697         devpack->res[0].start = base;
698         devpack->res[0].end = base + size - 1;
699         devpack->res[0].flags = IORESOURCE_MEM;
700
701         devpack->pdev.resource = devpack->res;
702         devpack->pdev.num_resources = 1;
703         if (xlnk_config_dma_type(xlnk_config_dma_manual))
704                 status = platform_device_register(&devpack->pdev);
705         if (status) {
706                 kfree(devpack);
707                 *handle = 0;
708         } else {
709                 xlnk_devpacks_add(devpack);
710                 *handle = (xlnk_intptr_type)devpack;
711         }
712
713 #endif
714         return status;
715 }
716
717 static int xlnk_mcdmaregister(char *name, unsigned int id,
718                               xlnk_intptr_type base, unsigned int size,
719                               unsigned int mm2s_chan_num,
720                               unsigned int mm2s_chan_irq,
721                               unsigned int s2mm_chan_num,
722                               unsigned int s2mm_chan_irq,
723                               xlnk_intptr_type *handle)
724 {
725         int status = -1;
726
727 #ifdef CONFIG_XILINX_MCDMA
728         struct xlnk_device_pack *devpack;
729         if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
730                 pr_err("Standard driver not yet supporting multichannel\n");
731                 return -EFAULT;
732         }
733
734         if (strcmp(name, "xdma"))
735                 return -EINVAL;
736
737
738         devpack = xlnk_devpacks_find(base);
739         if (devpack) {
740                 *handle = (xlnk_intptr_type)devpack;
741                 return 0;
742         }
743
744         devpack = kzalloc(sizeof(struct xlnk_device_pack),
745                           GFP_KERNEL);
746         if (!devpack)
747                 return -ENOMEM;
748
749         strcpy(devpack->name, name);
750         devpack->pdev.name = devpack->name;
751         devpack->pdev.id = id;
752
753         devpack->mcdma_dev_cfg.tx_chans = mm2s_chan_num;
754         devpack->mcdma_dev_cfg.rx_chans = s2mm_chan_num;
755         devpack->mcdma_dev_cfg.legacy_mode = XDMA_MCHAN_MODE;
756         devpack->mcdma_dev_cfg.device_id   = id;
757
758         devpack->pdev.dev.platform_data  = &devpack->mcdma_dev_cfg;
759         devpack->pdev.dev.dma_mask = &dma_mask;
760         devpack->pdev.dev.coherent_dma_mask = dma_mask;
761         devpack->pdev.dev.release = xdma_if_device_release,
762
763         devpack->res[0].start = base;
764         devpack->res[0].end   = base + size - 1;
765         devpack->res[0].flags = IORESOURCE_MEM;
766
767         devpack->res[1].start = mm2s_chan_irq;
768         devpack->res[1].end   = s2mm_chan_irq;
769         devpack->res[1].flags = IORESOURCE_IRQ;
770
771         devpack->pdev.resource    = devpack->res;
772         devpack->pdev.num_resources = 2;
773
774         status = platform_device_register(&devpack->pdev);
775         if (status) {
776                 kfree(devpack);
777                 *handle = 0;
778         } else {
779                 xlnk_devpacks_add(devpack);
780                 *handle = (xlnk_intptr_type)devpack;
781         }
782
783 #endif
784
785         return status;
786 }
787
788 static int xlnk_allocbuf_ioctl(struct file *filp, unsigned int code,
789                         unsigned long args)
790 {
791
792         union xlnk_args temp_args;
793         int status;
794         xlnk_int_type id;
795
796         status = copy_from_user(&temp_args, (void __user *)args,
797                                 sizeof(union xlnk_args));
798
799         if (status)
800                 return -ENOMEM;
801
802         id = xlnk_allocbuf(temp_args.allocbuf.len,
803                            temp_args.allocbuf.cacheable);
804
805         if (id <= 0)
806                 return -ENOMEM;
807
808         temp_args.allocbuf.id = id;
809         temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
810         status = copy_to_user(args, &temp_args, sizeof(union xlnk_args));
811
812         return status;
813 }
814
815 static int xlnk_freebuf(int id)
816 {
817         void *alloc_point;
818         dma_addr_t p_addr;
819         size_t buf_len;
820         int cacheable;
821         if (id <= 0 || id >= xlnk_bufpool_size)
822                 return -ENOMEM;
823
824         if (!xlnk_bufpool[id])
825                 return -ENOMEM;
826
827         spin_lock(&xlnk_buf_lock);
828         alloc_point = xlnk_bufpool_alloc_point[id];
829         p_addr = xlnk_phyaddr[id];
830         buf_len = xlnk_buflen[id];
831         xlnk_bufpool[id] = NULL;
832         xlnk_phyaddr[id] = (dma_addr_t)NULL;
833         xlnk_buflen[id] = 0;
834         cacheable = xlnk_bufcacheable[id];
835         xlnk_bufcacheable[id] = 0;
836         spin_unlock(&xlnk_buf_lock);
837
838         if (cacheable)
839                 dma_free_noncoherent(xlnk_dev,
840                                      buf_len,
841                                      alloc_point,
842                                      p_addr);
843         else
844                 dma_free_coherent(xlnk_dev,
845                                   buf_len,
846                                   alloc_point,
847                                   p_addr);
848
849         return 0;
850 }
851
852 static void xlnk_free_all_buf(void)
853 {
854         int i;
855
856         for (i = 1; i < xlnk_bufpool_size; i++)
857                 xlnk_freebuf(i);
858 }
859
860 static int xlnk_freebuf_ioctl(struct file *filp, unsigned int code,
861                         unsigned long args)
862 {
863
864         union xlnk_args temp_args;
865         int status;
866         int id;
867
868         status = copy_from_user(&temp_args, (void __user *)args,
869                                 sizeof(union xlnk_args));
870
871         if (status)
872                 return -ENOMEM;
873
874         id = temp_args.freebuf.id;
875         return xlnk_freebuf(id);
876 }
877
878 static int xlnk_adddmabuf_ioctl(struct file *filp, unsigned int code,
879                         unsigned long args)
880 {
881         union xlnk_args temp_args;
882         struct xlnk_dmabuf_reg *db;
883         int status;
884         status = copy_from_user(&temp_args, (void __user *)args,
885                                 sizeof(union xlnk_args));
886
887         if (status)
888                 return -ENOMEM;
889
890         db = kzalloc(sizeof(struct xlnk_dmabuf_reg), GFP_KERNEL);
891         if (!db)
892                 return -ENOMEM;
893
894         db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
895         db->user_vaddr = temp_args.dmabuf.user_addr;
896         db->dbuf = dma_buf_get(db->dmabuf_fd);
897
898         INIT_LIST_HEAD(&db->list);
899         list_add_tail(&db->list, &xlnk_dmabuf_list);
900
901         return 0;
902 }
903
904 static int xlnk_cleardmabuf_ioctl(struct file *filp, unsigned int code,
905                                 unsigned long args)
906 {
907         union xlnk_args temp_args;
908         struct xlnk_dmabuf_reg *dp, *dp_temp;
909         int status;
910
911         status = copy_from_user(&temp_args, (void __user *)args,
912                                 sizeof(union xlnk_args));
913
914         if (status)
915                 return -ENOMEM;
916
917         list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
918                 if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
919                         dma_buf_put(dp->dbuf);
920                         list_del(&dp->list);
921                         kfree(dp);
922                         return 0;
923                 }
924         }
925         return 1;
926 }
927
928 static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
929                                  unsigned long args)
930 {
931
932 #ifdef CONFIG_XILINX_DMA_APF
933
934         union xlnk_args temp_args;
935         int status;
936
937         status = copy_from_user(&temp_args, (void __user *)args,
938                                 sizeof(union xlnk_args));
939
940         if (status)
941                 return -ENOMEM;
942
943         if (!temp_args.dmarequest.name[0])
944                 return 0;
945
946         if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
947                 struct dma_chan *chan;
948
949                 if (!xlnk_dev->of_node) {
950                         pr_err("xlnk %s: No device tree info.", __func__);
951                         return -EFAULT;
952                 }
953                 chan = dma_request_slave_channel(xlnk_dev,
954                                                  temp_args.dmarequest.name);
955                 if (!chan) {
956                         pr_err("Unable to get channel named %s\n",
957                                temp_args.dmarequest.name);
958                         return -EFAULT;
959                 }
960                 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
961         } else {
962                 struct xdma_chan *chan =
963                         xdma_request_channel(temp_args.dmarequest.name);
964
965                 if (!chan)
966                         return -ENOMEM;
967                 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
968                 temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
969                 temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
970         }
971
972         if (copy_to_user((void __user *)args, &temp_args,
973                         sizeof(union xlnk_args)))
974                 return -EFAULT;
975
976         return 0;
977
978 #else
979
980         return -1;
981
982 #endif
983
984 }
985
986 static void xlnk_complete_dma_callback(void *args)
987 {
988         complete(args);
989 }
990
991 static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
992                                 unsigned long args)
993 {
994 #ifdef CONFIG_XILINX_DMA_APF
995         union xlnk_args temp_args;
996         struct xdma_head *dmahead;
997         struct xlnk_dmabuf_reg *dp, *cp;
998         int status = -1;
999
1000         status = copy_from_user(&temp_args, (void __user *)args,
1001                                 sizeof(union xlnk_args));
1002
1003         if (status)
1004                 return -ENOMEM;
1005
1006         if (!temp_args.dmasubmit.dmachan)
1007                 return -ENODEV;
1008
1009         cp = NULL;
1010
1011         list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
1012                 if (dp->user_vaddr == temp_args.dmasubmit.buf) {
1013                         cp = dp;
1014                         break;
1015                 }
1016         }
1017
1018         if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1019                 struct xlnk_dma_transfer_handle *t =
1020                         vmalloc(sizeof(struct xlnk_dma_transfer_handle));
1021
1022                 if (!t) {
1023                         pr_err("Could not allocate dma transfer handle\n");
1024                         return -ENOMEM;
1025                 }
1026                 t->transfer_direction = temp_args.dmasubmit.dmadir;
1027                 t->user_addr = (xlnk_intptr_type)temp_args.dmasubmit.buf;
1028                 t->transfer_length = temp_args.dmasubmit.len;
1029                 t->flags = temp_args.dmasubmit.flag;
1030                 t->channel = (struct dma_chan *)(temp_args.dmasubmit.dmachan);
1031                 if (t->flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
1032                         int id = xlnk_buf_find_by_phys_addr(t->user_addr);
1033
1034                         if (id <= 0) {
1035                                 pr_err("invalid ID, failing\n");
1036                                 return -EFAULT;
1037                         }
1038                         t->kern_addr = xlnk_bufpool[id];
1039                         t->sg_effective_length = 1;
1040                         t->sg_list_size = 1;
1041                         t->sg_list = kmalloc(sizeof(*t->sg_list)
1042                                              * (t->sg_list_size),
1043                                              GFP_KERNEL | GFP_DMA);
1044                         sg_init_table(t->sg_list, t->sg_list_size);
1045                         t->dma_addr = dma_map_single(t->channel->device->dev,
1046                                                      t->kern_addr,
1047                                                      t->transfer_length,
1048                                                      t->transfer_direction);
1049                         if (dma_mapping_error(t->channel->device->dev,
1050                                               t->dma_addr)) {
1051                                 pr_err("DMA mapping error\n");
1052                                 vfree(t);
1053                                 return -EFAULT;
1054                         }
1055                         sg_dma_address(t->sg_list) = t->dma_addr;
1056                         sg_dma_len(t->sg_list) = t->transfer_length;
1057                 } else {
1058                         unsigned long it;
1059                         int locked_page_count;
1060                         int p_it;
1061                         unsigned long first_page = t->user_addr / PAGE_SIZE;
1062                         unsigned long last_page =
1063                                 (t->user_addr + (t->transfer_length - 1))
1064                                 / PAGE_SIZE;
1065
1066                         t->kern_addr = NULL;
1067                         t->dma_addr = 0;
1068                         t->sg_list_size = last_page - first_page;
1069                         t->sg_list = kmalloc(sizeof(*t->sg_list)
1070                                              * (t->sg_list_size),
1071                                              GFP_KERNEL | GFP_DMA);
1072                         if (!t->sg_list) {
1073                                 vfree(t);
1074                                 return -ENOMEM;
1075                         }
1076                         if (xlnk_page_store_size <= t->sg_list_size) {
1077                                 struct page **tmp =
1078                                         vmalloc(sizeof(struct page *)
1079                                                 * 2 * t->sg_list_size);
1080
1081                                 if (!tmp) {
1082                                         kfree(t->sg_list);
1083                                         vfree(t);
1084                                         return -ENOMEM;
1085                                 }
1086                                 xlnk_page_store = tmp;
1087                                 xlnk_page_store_size = 2 * t->sg_list_size;
1088                         }
1089                         down_read(&current->mm->mmap_sem);
1090                         locked_page_count =
1091                                 get_user_pages(first_page * PAGE_SIZE,
1092                                                t->sg_list_size, 1, 1,
1093                                                xlnk_page_store, NULL);
1094                         up_read(&current->mm->mmap_sem);
1095                         if (locked_page_count != t->sg_list_size) {
1096                                 int i;
1097
1098                                 pr_err("could not get user pages");
1099                                 for (i = 0; i < locked_page_count; i++)
1100                                         put_page(xlnk_page_store[i]);
1101                                 kfree(t->sg_list);
1102                                 vfree(t);
1103                                 return -EFAULT;
1104                         }
1105                         it = t->user_addr;
1106                         p_it = 0;
1107                         sg_init_table(t->sg_list, t->sg_list_size);
1108                         while (it < t->user_addr + t->transfer_length) {
1109                                 unsigned long page_addr =
1110                                         (it / PAGE_SIZE) * PAGE_SIZE;
1111                                 unsigned long offset = it - page_addr;
1112                                 unsigned long page_barrier =
1113                                         page_addr + PAGE_SIZE;
1114                                 unsigned long segment_end =
1115                                         (page_barrier < t->user_addr +
1116                                         t->transfer_length) ?
1117                                         page_barrier :
1118                                         (t->user_addr + t->transfer_length);
1119                                 unsigned long segment_size = segment_end - it;
1120
1121                                 it = segment_end;
1122                                 sg_set_page(t->sg_list + p_it,
1123                                             xlnk_page_store[p_it],
1124                                             (unsigned int)segment_size,
1125                                             (unsigned int)offset);
1126                                 p_it++;
1127                         }
1128                         t->sg_effective_length =
1129                                 dma_map_sg(t->channel->device->dev,
1130                                            t->sg_list,
1131                                            t->sg_list_size,
1132                                            t->transfer_direction);
1133                         if (t->sg_effective_length == 0) {
1134                                 int i;
1135
1136                                 pr_err("could not map user pages");
1137                                 for (i = 0; i < locked_page_count; i++)
1138                                         put_page(xlnk_page_store[i]);
1139                                 kfree(t->sg_list);
1140                                 vfree(t);
1141                                 return -EFAULT;
1142                         }
1143                 }
1144                 t->async_desc =
1145                         t->channel->device->device_prep_slave_sg(
1146                                 t->channel, t->sg_list,
1147                                 t->sg_effective_length,
1148                                 t->transfer_direction,
1149                                 DMA_CTRL_ACK | DMA_PREP_INTERRUPT,
1150                                 temp_args.dmasubmit.appwords_i);
1151                 if (!t->async_desc) {
1152                         pr_err("Async desc is null, aborting\n");
1153                         return -EFAULT;
1154                 }
1155                 init_completion(&t->completion_handle);
1156                 t->async_desc->callback = &xlnk_complete_dma_callback;
1157                 t->async_desc->callback_param = &t->completion_handle;
1158                 t->dma_cookie = t->async_desc->tx_submit(t->async_desc);
1159                 dma_async_issue_pending(t->channel);
1160                 if (dma_submit_error(t->dma_cookie)) {
1161                         pr_err("Huge problem submitting DMA action\n");
1162                         return -EFAULT;
1163                 }
1164                 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)t;
1165         } else {
1166                 int buf_id;
1167                 void *kaddr = NULL;
1168
1169                 spin_lock(&xlnk_buf_lock);
1170                 buf_id =
1171                         xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
1172                 if (buf_id) {
1173                         xlnk_intptr_type addr_delta =
1174                                 temp_args.dmasubmit.buf -
1175                                 xlnk_phyaddr[buf_id];
1176                         kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
1177                 }
1178                 spin_unlock(&xlnk_buf_lock);
1179
1180                 status = xdma_submit((struct xdma_chan *)
1181                                      (temp_args.dmasubmit.dmachan),
1182                                      temp_args.dmasubmit.buf,
1183                                                  kaddr,
1184                                      temp_args.dmasubmit.len,
1185                                      temp_args.dmasubmit.nappwords_i,
1186                                      temp_args.dmasubmit.appwords_i,
1187                                      temp_args.dmasubmit.nappwords_o,
1188                                      temp_args.dmasubmit.flag,
1189                                      &dmahead,
1190                                      cp);
1191
1192                 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
1193                 temp_args.dmasubmit.last_bd_index =
1194                         (xlnk_intptr_type)dmahead->last_bd_index;
1195         }
1196         if (!status) {
1197                 if (copy_to_user((void __user *)args, &temp_args,
1198                                 sizeof(union xlnk_args)))
1199                         return -EFAULT;
1200         }
1201         return status;
1202 #endif
1203         return -ENOMEM;
1204 }
1205
1206
1207 static int xlnk_dmawait_ioctl(struct file *filp, unsigned int code,
1208                                   unsigned long args)
1209 {
1210         int status = -1;
1211
1212 #ifdef CONFIG_XILINX_DMA_APF
1213         union xlnk_args temp_args;
1214         struct xdma_head *dmahead;
1215
1216         status = copy_from_user(&temp_args, (void __user *)args,
1217                                 sizeof(union xlnk_args));
1218
1219         if (status)
1220                 return -ENOMEM;
1221         if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1222                 int dma_result;
1223                 struct xlnk_dma_transfer_handle *t =
1224                         (struct xlnk_dma_transfer_handle *)
1225                         temp_args.dmawait.dmahandle;
1226
1227                 wait_for_completion(&t->completion_handle);
1228                 dma_result = dma_async_is_tx_complete(t->channel,
1229                                                       t->dma_cookie,
1230                                                       NULL, NULL);
1231                 if (dma_result != DMA_COMPLETE) {
1232                         pr_err("Dma transfer failed for unknown reason\n");
1233                         return -1;
1234                 }
1235                 if (t->dma_addr) {
1236                         dma_unmap_single(t->channel->device->dev,
1237                                          t->dma_addr,
1238                                          t->transfer_length,
1239                                          t->transfer_direction);
1240                 } else {
1241                         int i;
1242
1243                         dma_unmap_sg(t->channel->device->dev,
1244                                      t->sg_list,
1245                                      t->sg_list_size,
1246                                      t->transfer_direction);
1247                         for (i = 0; i < t->sg_list_size; i++)
1248                                 put_page(sg_page(t->sg_list + i));
1249                 }
1250                 kfree(t->sg_list);
1251                 vfree(t);
1252         } else {
1253                 struct xdma_head *dmahead =
1254                         (struct xdma_head *)temp_args.dmawait.dmahandle;
1255
1256                 status = xdma_wait(dmahead,
1257                                    dmahead->userflag,
1258                                    &temp_args.dmawait.flags);
1259                 if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
1260                         if (temp_args.dmawait.nappwords) {
1261                                 memcpy(temp_args.dmawait.appwords,
1262                                        dmahead->appwords_o,
1263                                        dmahead->nappwords_o * sizeof(u32));
1264                         }
1265                         kfree(dmahead);
1266                 }
1267                 if (copy_to_user((void __user *)args,
1268                                  &temp_args,
1269                                  sizeof(union xlnk_args)))
1270                         return -EFAULT;
1271         }
1272 #endif
1273
1274         return status;
1275 }
1276
1277 static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
1278                                  unsigned long args)
1279 {
1280         int status = -1;
1281
1282 #ifdef CONFIG_XILINX_DMA_APF
1283
1284         union xlnk_args temp_args;
1285         status = copy_from_user(&temp_args, (void __user *)args,
1286                                 sizeof(union xlnk_args));
1287
1288         if (status)
1289                 return -ENOMEM;
1290
1291         if (xlnk_config_dma_type(xlnk_config_dma_standard))
1292                 dma_release_channel((struct dma_chan *)
1293                                    (temp_args.dmarelease.dmachan));
1294         else
1295                 xdma_release_channel((struct xdma_chan *)
1296                                     (temp_args.dmarelease.dmachan));
1297 #endif
1298
1299         return status;
1300 }
1301
1302
1303 static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
1304                                   unsigned long args)
1305 {
1306         union xlnk_args temp_args;
1307         int status;
1308         xlnk_intptr_type handle;
1309
1310         status = copy_from_user(&temp_args, (void __user *)args,
1311                                 sizeof(union xlnk_args));
1312
1313         if (status)
1314                 return -ENOMEM;
1315
1316         status = xlnk_devregister(temp_args.devregister.name,
1317                                   temp_args.devregister.id,
1318                                   temp_args.devregister.base,
1319                                   temp_args.devregister.size,
1320                                   temp_args.devregister.irqs,
1321                                   &handle);
1322
1323         return status;
1324 }
1325
1326 static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
1327                                   unsigned long args)
1328 {
1329         union xlnk_args temp_args;
1330         int status;
1331         xlnk_intptr_type handle;
1332
1333         status = copy_from_user(&temp_args, (void __user *)args,
1334                                 sizeof(union xlnk_args));
1335
1336         if (status)
1337                 return -ENOMEM;
1338
1339         status = xlnk_dmaregister(temp_args.dmaregister.name,
1340                                   temp_args.dmaregister.id,
1341                                   temp_args.dmaregister.base,
1342                                   temp_args.dmaregister.size,
1343                                   temp_args.dmaregister.chan_num,
1344                                   temp_args.dmaregister.chan0_dir,
1345                                   temp_args.dmaregister.chan0_irq,
1346                                   temp_args.dmaregister.chan0_poll_mode,
1347                                   temp_args.dmaregister.chan0_include_dre,
1348                                   temp_args.dmaregister.chan0_data_width,
1349                                   temp_args.dmaregister.chan1_dir,
1350                                   temp_args.dmaregister.chan1_irq,
1351                                   temp_args.dmaregister.chan1_poll_mode,
1352                                   temp_args.dmaregister.chan1_include_dre,
1353                                   temp_args.dmaregister.chan1_data_width,
1354                                   &handle);
1355
1356         return status;
1357 }
1358
1359 static int xlnk_mcdmaregister_ioctl(struct file *filp, unsigned int code,
1360                                   unsigned long args)
1361 {
1362         union xlnk_args temp_args;
1363         int status;
1364         xlnk_intptr_type handle;
1365
1366         status = copy_from_user(&temp_args, (void __user *)args,
1367                                 sizeof(union xlnk_args));
1368
1369         if (status)
1370                 return -ENOMEM;
1371
1372         status = xlnk_mcdmaregister(temp_args.mcdmaregister.name,
1373                                   temp_args.mcdmaregister.id,
1374                                   temp_args.mcdmaregister.base,
1375                                   temp_args.mcdmaregister.size,
1376                                   temp_args.mcdmaregister.mm2s_chan_num,
1377                                   temp_args.mcdmaregister.mm2s_chan_irq,
1378                                   temp_args.mcdmaregister.s2mm_chan_num,
1379                                   temp_args.mcdmaregister.s2mm_chan_irq,
1380                                   &handle);
1381
1382         return status;
1383 }
1384
1385 static int xlnk_devunregister_ioctl(struct file *filp, unsigned int code,
1386                                         unsigned long args)
1387 {
1388         union xlnk_args temp_args;
1389         int status;
1390
1391         status = copy_from_user(&temp_args, (void __user *)args,
1392                                 sizeof(union xlnk_args));
1393
1394         if (status)
1395                 return -ENOMEM;
1396
1397         xlnk_devpacks_free(temp_args.devunregister.base);
1398
1399         return 0;
1400 }
1401
1402 static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
1403                                    unsigned long args)
1404 {
1405         union xlnk_args temp_args;
1406         int status, size;
1407         void *paddr, *kaddr;
1408         int buf_id;
1409
1410         if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1411                 pr_err("Manual cache management is forbidden in standard dma types");
1412                 return -1;
1413         }
1414
1415         status = copy_from_user(&temp_args, (void __user *)args,
1416                                                 sizeof(union xlnk_args));
1417
1418         if (status) {
1419                 dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
1420                         status);
1421                 return -ENOMEM;
1422         }
1423
1424         if (!(temp_args.cachecontrol.action == 0 ||
1425                   temp_args.cachecontrol.action == 1)) {
1426                 dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
1427                        temp_args.cachecontrol.action);
1428                 return -EINVAL;
1429         }
1430
1431         size = temp_args.cachecontrol.size;
1432         paddr = temp_args.cachecontrol.phys_addr;
1433
1434         spin_lock(&xlnk_buf_lock);
1435         buf_id = xlnk_buf_find_by_phys_addr(paddr);
1436         kaddr = xlnk_bufpool[buf_id];
1437         spin_unlock(&xlnk_buf_lock);
1438
1439         if (buf_id == 0) {
1440                 pr_err("Illegal cachecontrol on non-sds_alloc memory");
1441                 return -EINVAL;
1442         }
1443
1444 #if XLNK_SYS_BIT_WIDTH == 32
1445         __cpuc_flush_dcache_area(kaddr, size);
1446         outer_flush_range(paddr, paddr + size);
1447         if (temp_args.cachecontrol.action == 1)
1448                 outer_inv_range(paddr, paddr + size);
1449 #else
1450         if (temp_args.cachecontrol.action == 1)
1451                 __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
1452         else
1453                 __dma_map_area(kaddr, size, DMA_TO_DEVICE);
1454 #endif
1455         return 0;
1456 }
1457
1458 static int xlnk_config_ioctl(struct file *filp, unsigned long args)
1459 {
1460         struct xlnk_config_block block;
1461         int status, setting = 0, i;
1462
1463         xlnk_config_clear_block(&block);
1464         status = copy_from_user(&block, (void __user *)args,
1465                                 sizeof(struct xlnk_config_block));
1466         if (status) {
1467                 pr_err("Error in copy_from_user. status= %d\n", status);
1468                 return -ENOMEM;
1469         }
1470         for (i = 0; i < xlnk_config_valid_size; i++)
1471                 if (block.valid_mask[i])
1472                         setting = 1;
1473         if (setting) {
1474                 status = xlnk_set_config(&block);
1475         } else {
1476                 xlnk_get_config(&block);
1477                 status = copy_to_user(args, &block,
1478                                       sizeof(struct xlnk_config_block));
1479         }
1480         return status;
1481 }
1482
1483 /* This function provides IO interface to the bridge driver. */
1484 static long xlnk_ioctl(struct file *filp, unsigned int code,
1485                          unsigned long args)
1486 {
1487         int status = 0;
1488
1489
1490         if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
1491                 return -ENOTTY;
1492         if (_IOC_NR(code) > XLNK_IOC_MAXNR)
1493                 return -ENOTTY;
1494
1495         /* some sanity check */
1496         switch (code) {
1497         case XLNK_IOCALLOCBUF:
1498                 status = xlnk_allocbuf_ioctl(filp, code, args);
1499                 break;
1500         case XLNK_IOCFREEBUF:
1501                 status = xlnk_freebuf_ioctl(filp, code, args);
1502                 break;
1503         case XLNK_IOCADDDMABUF:
1504                 status = xlnk_adddmabuf_ioctl(filp, code, args);
1505                 break;
1506         case XLNK_IOCCLEARDMABUF:
1507                 status = xlnk_cleardmabuf_ioctl(filp, code, args);
1508                 break;
1509         case XLNK_IOCDMAREQUEST:
1510                 status = xlnk_dmarequest_ioctl(filp, code, args);
1511                 break;
1512         case XLNK_IOCDMASUBMIT:
1513                 status = xlnk_dmasubmit_ioctl(filp, code, args);
1514                 break;
1515         case XLNK_IOCDMAWAIT:
1516                 status = xlnk_dmawait_ioctl(filp, code, args);
1517                 break;
1518         case XLNK_IOCDMARELEASE:
1519                 status = xlnk_dmarelease_ioctl(filp, code, args);
1520                 break;
1521         case XLNK_IOCDEVREGISTER:
1522                 status = xlnk_devregister_ioctl(filp, code, args);
1523                 break;
1524         case XLNK_IOCDMAREGISTER:
1525                 status = xlnk_dmaregister_ioctl(filp, code, args);
1526                 break;
1527         case XLNK_IOCMCDMAREGISTER:
1528                 status = xlnk_mcdmaregister_ioctl(filp, code, args);
1529                 break;
1530         case XLNK_IOCDEVUNREGISTER:
1531                 status = xlnk_devunregister_ioctl(filp, code, args);
1532                 break;
1533         case XLNK_IOCCACHECTRL:
1534                 status = xlnk_cachecontrol_ioctl(filp, code, args);
1535                 break;
1536         case XLNK_IOCSHUTDOWN:
1537                 status = xlnk_shutdown(args);
1538                 break;
1539         case XLNK_IOCRECRES: /* recover resource */
1540                 status = xlnk_recover_resource(args);
1541                 break;
1542         case XLNK_IOCCONFIG:
1543                 status = xlnk_config_ioctl(filp, args);
1544                 break;
1545         default:
1546                 status = -EINVAL;
1547         }
1548
1549         return status;
1550 }
1551
1552 static struct vm_operations_struct xlnk_vm_ops = {
1553         .open = xlnk_vma_open,
1554         .close = xlnk_vma_close,
1555 };
1556
1557 /* This function maps kernel space memory to user space memory. */
1558 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
1559 {
1560         int bufid;
1561         int status;
1562
1563         bufid = vma->vm_pgoff >> (24 - PAGE_SHIFT);
1564
1565         if (bufid == 0)
1566                 status = remap_pfn_range(vma, vma->vm_start,
1567                                 virt_to_phys(xlnk_dev_buf) >> PAGE_SHIFT,
1568                                 vma->vm_end - vma->vm_start,
1569                                 vma->vm_page_prot);
1570         else {
1571                 if (xlnk_config_dma_type(xlnk_config_dma_standard)) {
1572                         unsigned long pfn;
1573
1574                         if (vma->vm_start != PAGE_ALIGN(vma->vm_start)) {
1575                                 pr_err("Cannot map on non-aligned addresses\n");
1576                                 return -1;
1577                         }
1578                         if (xlnk_bufcacheable[bufid] == 0)
1579                                 vma->vm_page_prot =
1580                                 pgprot_noncached(vma->vm_page_prot);
1581                         pfn = virt_to_pfn(xlnk_bufpool[bufid]);
1582                         status = remap_pfn_range(vma,
1583                                                  vma->vm_start,
1584                                                  pfn,
1585                                                  vma->vm_end - vma->vm_start,
1586                                                  vma->vm_page_prot);
1587                         xlnk_userbuf[bufid] = vma->vm_start;
1588                 } else {
1589                         if (xlnk_bufcacheable[bufid] == 0)
1590                                 vma->vm_page_prot =
1591                                         pgprot_noncached(vma->vm_page_prot);
1592                         status = remap_pfn_range(vma, vma->vm_start,
1593                                                  xlnk_phyaddr[bufid]
1594                                                  >> PAGE_SHIFT,
1595                                                  vma->vm_end - vma->vm_start,
1596                                                  vma->vm_page_prot);
1597                 }
1598
1599         }
1600         if (status) {
1601                 pr_err("xlnk_mmap failed with code %d\n", EAGAIN);
1602                 return -EAGAIN;
1603         }
1604
1605         xlnk_vma_open(vma);
1606         vma->vm_ops = &xlnk_vm_ops;
1607         vma->vm_private_data = xlnk_bufpool[bufid];
1608
1609         return 0;
1610 }
1611
1612 static void xlnk_vma_open(struct vm_area_struct *vma)
1613 {
1614         xlnk_dev_vmas++;
1615 }
1616
1617 static void xlnk_vma_close(struct vm_area_struct *vma)
1618 {
1619         xlnk_dev_vmas--;
1620 }
1621
1622
1623
1624
1625
1626 static int xlnk_shutdown(unsigned long buf)
1627 {
1628         return 0;
1629 }
1630
1631 static int xlnk_recover_resource(unsigned long buf)
1632 {
1633         xlnk_free_all_buf();
1634 #ifdef CONFIG_XILINX_DMA_APF
1635         xdma_release_all_channels();
1636 #endif
1637         return 0;
1638 }
1639
1640 module_platform_driver(xlnk_driver);
1641
1642 MODULE_DESCRIPTION("Xilinx APF driver");
1643 MODULE_LICENSE("GPL");