]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/staging/apf/xlnk.c
staging: apf: Correct kernel-doc structure for xlnk.c
[zynq/linux.git] / drivers / staging / apf / xlnk.c
1 /*
2  * xlnk.c
3  *
4  * Xilinx Accelerator driver support.
5  *
6  * Copyright (C) 2010 Xilinx Inc.
7  *
8  * This package is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 /*  ----------------------------------- Host OS */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm.h>
23 #include <linux/fs.h>
24 #include <linux/slab.h>
25 #include <linux/gfp.h>
26 #include <linux/mm.h>
27 #include <asm/cacheflush.h>
28 #include <linux/io.h>
29 #include <linux/dma-buf.h>
30
31 #include <linux/string.h>
32
33 #include <linux/uaccess.h>
34
35 #include <linux/dmaengine.h>
36 #include <linux/completion.h>
37 #include <linux/wait.h>
38
39 #include <linux/device.h>
40 #include <linux/init.h>
41 #include <linux/cdev.h>
42
43 #include <linux/sched.h>
44 #include <linux/pagemap.h>
45 #include <linux/errno.h>        /* error codes */
46 #include <linux/dma-mapping.h>  /* dma */
47 #include <linux/of.h>
48 #include <linux/list.h>
49 #include <linux/dma/xilinx_dma.h>
50 #include <linux/uio_driver.h>
51 #include <asm/cacheflush.h>
52 #include <linux/semaphore.h>
53
54 #include "xlnk-ioctl.h"
55 #include "xlnk-sysdef.h"
56 #include "xlnk.h"
57
58 #ifdef CONFIG_XILINX_DMA_APF
59 #include "xilinx-dma-apf.h"
60 #endif
61
62 #define DRIVER_NAME  "xlnk"
63 #define DRIVER_VERSION  "0.2"
64
65 static struct platform_device *xlnk_pdev;
66 static struct device *xlnk_dev;
67
68 static struct cdev xlnk_cdev;
69
70 static struct class *xlnk_class;
71
72 static s32 driver_major;
73
74 static char *driver_name = DRIVER_NAME;
75
76 static void *xlnk_dev_buf;
77 static ssize_t xlnk_dev_size;
78 static int xlnk_dev_vmas;
79
80 #define XLNK_BUF_POOL_SIZE      4096
81 static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
82 static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
83 static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
84 static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
85 static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
86 static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
87 static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
88 static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
89 static spinlock_t xlnk_buf_lock;
90
91 static int xlnk_open(struct inode *ip, struct file *filp);
92 static int xlnk_release(struct inode *ip, struct file *filp);
93 static long xlnk_ioctl(struct file *filp, unsigned int code,
94                        unsigned long args);
95 static ssize_t xlnk_read(struct file *filp, char __user *buf,
96                          size_t count, loff_t *offp);
97 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
98                           size_t count, loff_t *offp);
99 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
100 static void xlnk_vma_open(struct vm_area_struct *vma);
101 static void xlnk_vma_close(struct vm_area_struct *vma);
102
103 static int xlnk_init_bufpool(void);
104
105 LIST_HEAD(xlnk_dmabuf_list);
106
107 static int xlnk_shutdown(unsigned long buf);
108 static int xlnk_recover_resource(unsigned long buf);
109
110 static const struct file_operations xlnk_fops = {
111         .open = xlnk_open,
112         .release = xlnk_release,
113         .read = xlnk_read,
114         .write = xlnk_write,
115         .unlocked_ioctl = xlnk_ioctl,
116         .mmap = xlnk_mmap,
117 };
118
119 #define MAX_XLNK_DMAS 128
120
121 struct xlnk_device_pack {
122         char name[64];
123         struct platform_device pdev;
124         struct resource res[8];
125         struct uio_info *io_ptr;
126         int refs;
127
128 #ifdef CONFIG_XILINX_DMA_APF
129         struct xdma_channel_config dma_chan_cfg[4];  /* for xidane dma only */
130         struct xdma_device_config dma_dev_cfg;     /* for xidane dma only */
131 #endif
132 };
133
134 static struct semaphore xlnk_devpack_sem;
135 static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
136 static void xlnk_devpacks_init(void)
137 {
138         unsigned int i;
139
140         sema_init(&xlnk_devpack_sem, 1);
141         for (i = 0; i < MAX_XLNK_DMAS; i++)
142                 xlnk_devpacks[i] = NULL;
143 }
144
145 static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
146 {
147         unsigned int i;
148
149         for (i = 0; i < MAX_XLNK_DMAS; i++) {
150                 if (!xlnk_devpacks[i]) {
151                         struct xlnk_device_pack *ret;
152
153                         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
154                         ret->pdev.id = i;
155                         xlnk_devpacks[i] = ret;
156
157                         return ret;
158                 }
159         }
160
161         return NULL;
162 }
163
164 static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
165 {
166         unsigned int i;
167
168         for (i = 0; i < MAX_XLNK_DMAS; i++)
169                 if (xlnk_devpacks[i] == devpack)
170                         xlnk_devpacks[i] = NULL;
171         kfree(devpack);
172 }
173
174 static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
175 {
176         unsigned int i;
177
178         for (i = 0; i < MAX_XLNK_DMAS; i++) {
179                 if (xlnk_devpacks[i] &&
180                     xlnk_devpacks[i]->res[0].start == base)
181                         return xlnk_devpacks[i];
182         }
183         return NULL;
184 }
185
186 static void xlnk_devpacks_free(xlnk_intptr_type base)
187 {
188         struct xlnk_device_pack *devpack;
189
190         down(&xlnk_devpack_sem);
191         devpack = xlnk_devpacks_find(base);
192         if (!devpack) {
193                 up(&xlnk_devpack_sem);
194                 return;
195         }
196         devpack->refs--;
197         if (devpack->refs) {
198                 up(&xlnk_devpack_sem);
199                 return;
200         }
201         platform_device_unregister(&devpack->pdev);
202         xlnk_devpacks_delete(devpack);
203         kfree(devpack);
204         up(&xlnk_devpack_sem);
205 }
206
207 static void xlnk_devpacks_free_all(void)
208 {
209         struct xlnk_device_pack *devpack;
210         unsigned int i;
211
212         for (i = 0; i < MAX_XLNK_DMAS; i++) {
213                 devpack = xlnk_devpacks[i];
214                 if (devpack) {
215                         if (devpack->io_ptr) {
216                                 uio_unregister_device(devpack->io_ptr);
217                                 kfree(devpack->io_ptr);
218                         } else {
219                                 platform_device_unregister(&devpack->pdev);
220                         }
221                         xlnk_devpacks_delete(devpack);
222                         kfree(devpack);
223                 }
224         }
225 }
226
227 static int xlnk_probe(struct platform_device *pdev)
228 {
229         int err;
230         dev_t dev = 0;
231
232         xlnk_dev_buf = NULL;
233         xlnk_dev_size = 0;
234         xlnk_dev_vmas = 0;
235
236         /* use 2.6 device model */
237         err = alloc_chrdev_region(&dev, 0, 1, driver_name);
238         if (err) {
239                 dev_err(&pdev->dev, "%s: Can't get major %d\n",
240                         __func__, driver_major);
241                 goto err1;
242         }
243
244         cdev_init(&xlnk_cdev, &xlnk_fops);
245
246         xlnk_cdev.owner = THIS_MODULE;
247
248         err = cdev_add(&xlnk_cdev, dev, 1);
249
250         if (err) {
251                 dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
252                         __func__);
253                 goto err3;
254         }
255
256         /* udev support */
257         xlnk_class = class_create(THIS_MODULE, "xlnk");
258         if (IS_ERR(xlnk_class)) {
259                 dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
260                 goto err3;
261         }
262
263         driver_major = MAJOR(dev);
264
265         dev_info(&pdev->dev, "Major %d\n", driver_major);
266
267         device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
268                       NULL, "xlnk");
269
270         xlnk_init_bufpool();
271
272         dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
273
274         xlnk_pdev = pdev;
275         xlnk_dev = &pdev->dev;
276
277         if (xlnk_pdev)
278                 dev_info(&pdev->dev, "xlnk_pdev is not null\n");
279         else
280                 dev_info(&pdev->dev, "xlnk_pdev is null\n");
281
282         xlnk_devpacks_init();
283
284         return 0;
285 err3:
286         cdev_del(&xlnk_cdev);
287         unregister_chrdev_region(dev, 1);
288 err1:
289         return err;
290 }
291
292 static int xlnk_buf_findnull(void)
293 {
294         int i;
295
296         for (i = 1; i < xlnk_bufpool_size; i++) {
297                 if (!xlnk_bufpool[i])
298                         return i;
299         }
300
301         return 0;
302 }
303
304 static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
305 {
306         int i;
307
308         for (i = 1; i < xlnk_bufpool_size; i++) {
309                 if (xlnk_bufpool[i] &&
310                     xlnk_phyaddr[i] <= addr &&
311                     xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
312                         return i;
313         }
314
315         return 0;
316 }
317
318 static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
319 {
320         int i;
321
322         for (i = 1; i < xlnk_bufpool_size; i++) {
323                 if (xlnk_bufpool[i] &&
324                     xlnk_buf_process[i] == pid &&
325                     xlnk_userbuf[i] <= addr &&
326                     xlnk_userbuf[i] + xlnk_buflen[i] > addr)
327                         return i;
328         }
329
330         return 0;
331 }
332
333 /*
334  * allocate and return an id
335  * id must be a positve number
336  */
337 static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
338 {
339         int id;
340         void *kaddr;
341         dma_addr_t phys_addr_anchor;
342         unsigned long attrs;
343
344         attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
345
346         kaddr = dma_alloc_attrs(xlnk_dev,
347                                 len,
348                                 &phys_addr_anchor,
349                                 GFP_KERNEL | GFP_DMA,
350                                 attrs);
351         if (!kaddr)
352                 return -ENOMEM;
353
354         spin_lock(&xlnk_buf_lock);
355         id = xlnk_buf_findnull();
356         if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
357                 xlnk_bufpool_alloc_point[id] = kaddr;
358                 xlnk_bufpool[id] = kaddr;
359                 xlnk_buflen[id] = len;
360                 xlnk_bufcacheable[id] = cacheable;
361                 xlnk_phyaddr[id] = phys_addr_anchor;
362         }
363         spin_unlock(&xlnk_buf_lock);
364
365         if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
366                 return -ENOMEM;
367
368         return id;
369 }
370
371 static int xlnk_init_bufpool(void)
372 {
373         unsigned int i;
374
375         spin_lock_init(&xlnk_buf_lock);
376         xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
377         *((char *)xlnk_dev_buf) = '\0';
378
379         if (!xlnk_dev_buf) {
380                 dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
381                 return -ENOMEM;
382         }
383
384         xlnk_bufpool[0] = xlnk_dev_buf;
385         for (i = 1; i < xlnk_bufpool_size; i++)
386                 xlnk_bufpool[i] = NULL;
387
388         return 0;
389 }
390
391 #define XLNK_SUSPEND NULL
392 #define XLNK_RESUME NULL
393
394 static int xlnk_remove(struct platform_device *pdev)
395 {
396         dev_t devno;
397
398         kfree(xlnk_dev_buf);
399         xlnk_dev_buf = NULL;
400
401         devno = MKDEV(driver_major, 0);
402         cdev_del(&xlnk_cdev);
403         unregister_chrdev_region(devno, 1);
404         if (xlnk_class) {
405                 /* remove the device from sysfs */
406                 device_destroy(xlnk_class, MKDEV(driver_major, 0));
407                 class_destroy(xlnk_class);
408         }
409
410         xlnk_devpacks_free_all();
411
412         return 0;
413 }
414
415 static const struct of_device_id xlnk_match[] = {
416         { .compatible = "xlnx,xlnk-1.0", },
417         {}
418 };
419 MODULE_DEVICE_TABLE(of, xlnk_match);
420
421 static struct platform_driver xlnk_driver = {
422         .driver = {
423                 .name = DRIVER_NAME,
424                 .of_match_table = xlnk_match,
425         },
426         .probe = xlnk_probe,
427         .remove = xlnk_remove,
428         .suspend = XLNK_SUSPEND,
429         .resume = XLNK_RESUME,
430 };
431
432 static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
433
434 /*
435  * This function is called when an application opens handle to the
436  * bridge driver.
437  */
438 static int xlnk_open(struct inode *ip, struct file *filp)
439 {
440         if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
441                 xlnk_dev_size = 0;
442
443         return 0;
444 }
445
446 static ssize_t xlnk_read(struct file *filp,
447                          char __user *buf,
448                          size_t count,
449                          loff_t *offp)
450 {
451         ssize_t retval = 0;
452
453         if (*offp >= xlnk_dev_size)
454                 goto out;
455
456         if (*offp + count > xlnk_dev_size)
457                 count = xlnk_dev_size - *offp;
458
459         if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
460                 retval = -EFAULT;
461                 goto out;
462         }
463         *offp += count;
464         retval = count;
465
466  out:
467         return retval;
468 }
469
470 static ssize_t xlnk_write(struct file *filp, const char __user *buf,
471                           size_t count, loff_t *offp)
472 {
473         ssize_t retval = 0;
474
475         if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
476                 retval = -EFAULT;
477                 goto out;
478         }
479         *offp += count;
480         retval = count;
481
482         if (xlnk_dev_size < *offp)
483                 xlnk_dev_size = *offp;
484
485  out:
486         return retval;
487 }
488
489 /*
490  * This function is called when an application closes handle to the bridge
491  * driver.
492  */
493 static int xlnk_release(struct inode *ip, struct file *filp)
494 {
495         return 0;
496 }
497
498 static int xlnk_devregister(char *name,
499                             unsigned int id,
500                             xlnk_intptr_type base,
501                             unsigned int size,
502                             unsigned int *irqs,
503                             xlnk_intptr_type *handle)
504 {
505         unsigned int nres;
506         unsigned int nirq;
507         unsigned int *irqptr;
508         struct xlnk_device_pack *devpack;
509         unsigned int i;
510         int status;
511
512         down(&xlnk_devpack_sem);
513         devpack = xlnk_devpacks_find(base);
514         if (devpack) {
515                 *handle = (xlnk_intptr_type)devpack;
516                 devpack->refs++;
517                 status = 0;
518         } else {
519                 nirq = 0;
520                 irqptr = irqs;
521
522                 while (*irqptr) {
523                         nirq++;
524                         irqptr++;
525                 }
526
527                 if (nirq > 7) {
528                         up(&xlnk_devpack_sem);
529                         return -ENOMEM;
530                 }
531
532                 nres = nirq + 1;
533
534                 devpack = xlnk_devpacks_alloc();
535                 if (!devpack) {
536                         up(&xlnk_devpack_sem);
537                         pr_err("Failed to allocate device %s\n", name);
538                         return -ENOMEM;
539                 }
540                 devpack->io_ptr = NULL;
541                 strcpy(devpack->name, name);
542                 devpack->pdev.name = devpack->name;
543
544                 devpack->pdev.dev.dma_mask = &dma_mask;
545                 devpack->pdev.dev.coherent_dma_mask = dma_mask;
546
547                 devpack->res[0].start = base;
548                 devpack->res[0].end = base + size - 1;
549                 devpack->res[0].flags = IORESOURCE_MEM;
550
551                 for (i = 0; i < nirq; i++) {
552                         devpack->res[i + 1].start = irqs[i];
553                         devpack->res[i + 1].end = irqs[i];
554                         devpack->res[i + 1].flags = IORESOURCE_IRQ;
555                 }
556
557                 devpack->pdev.resource = devpack->res;
558                 devpack->pdev.num_resources = nres;
559
560                 status = platform_device_register(&devpack->pdev);
561                 if (status) {
562                         xlnk_devpacks_delete(devpack);
563                         *handle = 0;
564                 } else {
565                         *handle = (xlnk_intptr_type)devpack;
566                 }
567         }
568         up(&xlnk_devpack_sem);
569
570         return status;
571 }
572
573 static int xlnk_dmaregister(char *name,
574                             unsigned int id,
575                             xlnk_intptr_type base,
576                             unsigned int size,
577                             unsigned int chan_num,
578                             unsigned int chan0_dir,
579                             unsigned int chan0_irq,
580                             unsigned int chan0_poll_mode,
581                             unsigned int chan0_include_dre,
582                             unsigned int chan0_data_width,
583                             unsigned int chan1_dir,
584                             unsigned int chan1_irq,
585                             unsigned int chan1_poll_mode,
586                             unsigned int chan1_include_dre,
587                             unsigned int chan1_data_width,
588                             xlnk_intptr_type *handle)
589 {
590         int status = 0;
591
592 #ifdef CONFIG_XILINX_DMA_APF
593
594         struct xlnk_device_pack *devpack;
595
596         if (chan_num < 1 || chan_num > 2) {
597                 pr_err("%s: Expected either 1 or 2 channels, got %d\n",
598                        __func__, chan_num);
599                 return -EINVAL;
600         }
601
602         down(&xlnk_devpack_sem);
603         devpack = xlnk_devpacks_find(base);
604         if (devpack) {
605                 *handle = (xlnk_intptr_type)devpack;
606                 devpack->refs++;
607                 status = 0;
608         } else {
609                 devpack = xlnk_devpacks_alloc();
610                 if (!devpack) {
611                         up(&xlnk_devpack_sem);
612                         return -ENOMEM;
613                 }
614                 strcpy(devpack->name, name);
615                 devpack->pdev.name = "xilinx-axidma";
616
617                 devpack->io_ptr = NULL;
618
619                 devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
620                 devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
621                 devpack->dma_chan_cfg[0].irq = chan0_irq;
622                 devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
623                 devpack->dma_chan_cfg[0].type =
624                         (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
625                                 "axi-dma-s2mm-channel" :
626                                 "axi-dma-mm2s-channel";
627
628                 if (chan_num > 1) {
629                         devpack->dma_chan_cfg[1].include_dre =
630                                 chan1_include_dre;
631                         devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
632                         devpack->dma_chan_cfg[1].irq = chan1_irq;
633                         devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
634                         devpack->dma_chan_cfg[1].type =
635                                 (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
636                                         "axi-dma-s2mm-channel" :
637                                         "axi-dma-mm2s-channel";
638                 }
639
640                 devpack->dma_dev_cfg.name = devpack->name;
641                 devpack->dma_dev_cfg.type = "axi-dma";
642                 devpack->dma_dev_cfg.include_sg = 1;
643                 devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
644                 devpack->dma_dev_cfg.channel_count = chan_num;
645                 devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
646
647                 devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
648
649                 devpack->pdev.dev.dma_mask = &dma_mask;
650                 devpack->pdev.dev.coherent_dma_mask = dma_mask;
651
652                 devpack->res[0].start = base;
653                 devpack->res[0].end = base + size - 1;
654                 devpack->res[0].flags = IORESOURCE_MEM;
655
656                 devpack->pdev.resource = devpack->res;
657                 devpack->pdev.num_resources = 1;
658                 status = platform_device_register(&devpack->pdev);
659                 if (status) {
660                         xlnk_devpacks_delete(devpack);
661                         *handle = 0;
662                 } else {
663                         *handle = (xlnk_intptr_type)devpack;
664                 }
665         }
666         up(&xlnk_devpack_sem);
667
668 #endif
669         return status;
670 }
671
672 static int xlnk_allocbuf_ioctl(struct file *filp,
673                                unsigned int code,
674                                unsigned long args)
675 {
676         union xlnk_args temp_args;
677         int status;
678         xlnk_int_type id;
679
680         status = copy_from_user(&temp_args, (void __user *)args,
681                                 sizeof(union xlnk_args));
682
683         if (status)
684                 return -ENOMEM;
685
686         id = xlnk_allocbuf(temp_args.allocbuf.len,
687                            temp_args.allocbuf.cacheable);
688
689         if (id <= 0)
690                 return -ENOMEM;
691
692         temp_args.allocbuf.id = id;
693         temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
694         status = copy_to_user((void __user *)args,
695                               &temp_args,
696                               sizeof(union xlnk_args));
697
698         return status;
699 }
700
701 static int xlnk_freebuf(int id)
702 {
703         void *alloc_point;
704         dma_addr_t p_addr;
705         size_t buf_len;
706         int cacheable;
707         unsigned long attrs;
708
709         if (id <= 0 || id >= xlnk_bufpool_size)
710                 return -ENOMEM;
711
712         if (!xlnk_bufpool[id])
713                 return -ENOMEM;
714
715         spin_lock(&xlnk_buf_lock);
716         alloc_point = xlnk_bufpool_alloc_point[id];
717         p_addr = xlnk_phyaddr[id];
718         buf_len = xlnk_buflen[id];
719         xlnk_bufpool[id] = NULL;
720         xlnk_phyaddr[id] = (dma_addr_t)NULL;
721         xlnk_buflen[id] = 0;
722         cacheable = xlnk_bufcacheable[id];
723         xlnk_bufcacheable[id] = 0;
724         spin_unlock(&xlnk_buf_lock);
725
726         attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
727
728         dma_free_attrs(xlnk_dev,
729                        buf_len,
730                        alloc_point,
731                        p_addr,
732                        attrs);
733
734         return 0;
735 }
736
737 static void xlnk_free_all_buf(void)
738 {
739         int i;
740
741         for (i = 1; i < xlnk_bufpool_size; i++)
742                 xlnk_freebuf(i);
743 }
744
745 static int xlnk_freebuf_ioctl(struct file *filp,
746                               unsigned int code,
747                               unsigned long args)
748 {
749         union xlnk_args temp_args;
750         int status;
751         int id;
752
753         status = copy_from_user(&temp_args, (void __user *)args,
754                                 sizeof(union xlnk_args));
755
756         if (status)
757                 return -ENOMEM;
758
759         id = temp_args.freebuf.id;
760         return xlnk_freebuf(id);
761 }
762
763 static int xlnk_adddmabuf_ioctl(struct file *filp,
764                                 unsigned int code,
765                                 unsigned long args)
766 {
767         union xlnk_args temp_args;
768         struct xlnk_dmabuf_reg *db;
769         int status;
770
771         status = copy_from_user(&temp_args, (void __user *)args,
772                                 sizeof(union xlnk_args));
773
774         if (status)
775                 return -ENOMEM;
776
777         spin_lock(&xlnk_buf_lock);
778         list_for_each_entry(db, &xlnk_dmabuf_list, list) {
779                 if (db->user_vaddr == temp_args.dmasubmit.buf) {
780                         pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
781                                (unsigned long long)temp_args.dmabuf.user_addr);
782                         spin_unlock(&xlnk_buf_lock);
783                         return -EINVAL;
784                 }
785         }
786         spin_unlock(&xlnk_buf_lock);
787
788         db = kzalloc(sizeof(*db), GFP_KERNEL);
789         if (!db)
790                 return -ENOMEM;
791
792         db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
793         db->user_vaddr = temp_args.dmabuf.user_addr;
794         db->dbuf = dma_buf_get(db->dmabuf_fd);
795         db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
796         if (IS_ERR(db->dbuf_attach)) {
797                 dma_buf_put(db->dbuf);
798                 pr_err("Failed DMA-BUF attach\n");
799                 return -EINVAL;
800         }
801
802         db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
803                                                    DMA_BIDIRECTIONAL);
804
805         if (!db->dbuf_sg_table) {
806                 pr_err("Failed DMA-BUF map_attachment\n");
807                 dma_buf_detach(db->dbuf, db->dbuf_attach);
808                 dma_buf_put(db->dbuf);
809                 return -EINVAL;
810         }
811
812         spin_lock(&xlnk_buf_lock);
813         INIT_LIST_HEAD(&db->list);
814         list_add_tail(&db->list, &xlnk_dmabuf_list);
815         spin_unlock(&xlnk_buf_lock);
816
817         return 0;
818 }
819
820 static int xlnk_cleardmabuf_ioctl(struct file *filp,
821                                   unsigned int code,
822                                   unsigned long args)
823 {
824         union xlnk_args temp_args;
825         struct xlnk_dmabuf_reg *dp, *dp_temp;
826         int status;
827
828         status = copy_from_user(&temp_args, (void __user *)args,
829                                 sizeof(union xlnk_args));
830
831         if (status)
832                 return -ENOMEM;
833
834         spin_lock(&xlnk_buf_lock);
835         list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
836                 if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
837                         dma_buf_unmap_attachment(dp->dbuf_attach,
838                                                  dp->dbuf_sg_table,
839                                                  DMA_BIDIRECTIONAL);
840                         dma_buf_detach(dp->dbuf, dp->dbuf_attach);
841                         dma_buf_put(dp->dbuf);
842                         list_del(&dp->list);
843                         spin_unlock(&xlnk_buf_lock);
844                         kfree(dp);
845                         return 0;
846                 }
847         }
848         spin_unlock(&xlnk_buf_lock);
849         pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
850                (unsigned long long)temp_args.dmabuf.user_addr);
851
852         return 1;
853 }
854
855 static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
856                                  unsigned long args)
857 {
858 #ifdef CONFIG_XILINX_DMA_APF
859         union xlnk_args temp_args;
860         int status;
861         struct xdma_chan *chan;
862
863         status = copy_from_user(&temp_args, (void __user *)args,
864                                 sizeof(union xlnk_args));
865
866         if (status)
867                 return -ENOMEM;
868
869         if (!temp_args.dmarequest.name[0])
870                 return 0;
871
872         down(&xlnk_devpack_sem);
873         chan = xdma_request_channel(temp_args.dmarequest.name);
874         up(&xlnk_devpack_sem);
875         if (!chan)
876                 return -ENOMEM;
877         temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
878         temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
879         temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
880
881         if (copy_to_user((void __user *)args,
882                          &temp_args,
883                          sizeof(union xlnk_args)))
884                 return -EFAULT;
885
886         return 0;
887 #else
888         return -1;
889 #endif
890 }
891
892 static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
893                                 unsigned long args)
894 {
895 #ifdef CONFIG_XILINX_DMA_APF
896         union xlnk_args temp_args;
897         struct xdma_head *dmahead;
898         struct xlnk_dmabuf_reg *dp, *cp = NULL;
899         int buf_id;
900         void *kaddr = NULL;
901         int status = -1;
902
903         status = copy_from_user(&temp_args, (void __user *)args,
904                                 sizeof(union xlnk_args));
905
906         if (status)
907                 return -ENOMEM;
908
909         if (!temp_args.dmasubmit.dmachan)
910                 return -ENODEV;
911
912         spin_lock(&xlnk_buf_lock);
913         buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
914         if (buf_id) {
915                 xlnk_intptr_type addr_delta =
916                         temp_args.dmasubmit.buf -
917                         xlnk_phyaddr[buf_id];
918                 kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
919         } else {
920                 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
921                         if (dp->user_vaddr == temp_args.dmasubmit.buf) {
922                                 cp = dp;
923                                 break;
924                         }
925                 }
926         }
927         spin_unlock(&xlnk_buf_lock);
928
929         status = xdma_submit((struct xdma_chan *)
930                                         (temp_args.dmasubmit.dmachan),
931                                         temp_args.dmasubmit.buf,
932                                         kaddr,
933                                         temp_args.dmasubmit.len,
934                                         temp_args.dmasubmit.nappwords_i,
935                                         temp_args.dmasubmit.appwords_i,
936                                         temp_args.dmasubmit.nappwords_o,
937                                         temp_args.dmasubmit.flag,
938                                         &dmahead,
939                                         cp);
940
941         temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
942         temp_args.dmasubmit.last_bd_index =
943                 (xlnk_intptr_type)dmahead->last_bd_index;
944
945         if (!status) {
946                 if (copy_to_user((void __user *)args,
947                                  &temp_args,
948                                  sizeof(union xlnk_args)))
949                         return -EFAULT;
950         }
951         return status;
952 #endif
953         return -ENOMEM;
954 }
955
956 static int xlnk_dmawait_ioctl(struct file *filp,
957                               unsigned int code,
958                               unsigned long args)
959 {
960         int status = -1;
961 #ifdef CONFIG_XILINX_DMA_APF
962         union xlnk_args temp_args;
963         struct xdma_head *dmahead;
964
965         status = copy_from_user(&temp_args, (void __user *)args,
966                                 sizeof(union xlnk_args));
967
968         if (status)
969                 return -ENOMEM;
970
971         dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
972         status = xdma_wait(dmahead,
973                            dmahead->userflag,
974                            &temp_args.dmawait.flags);
975         if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
976                 if (temp_args.dmawait.nappwords) {
977                         memcpy(temp_args.dmawait.appwords,
978                                dmahead->appwords_o,
979                                dmahead->nappwords_o * sizeof(u32));
980                 }
981                 kfree(dmahead);
982         }
983         if (copy_to_user((void __user *)args,
984                          &temp_args,
985                          sizeof(union xlnk_args)))
986                 return -EFAULT;
987 #endif
988
989         return status;
990 }
991
992 static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
993                                  unsigned long args)
994 {
995         int status = -1;
996 #ifdef CONFIG_XILINX_DMA_APF
997         union xlnk_args temp_args;
998
999         status = copy_from_user(&temp_args, (void __user *)args,
1000                                 sizeof(union xlnk_args));
1001
1002         if (status)
1003                 return -ENOMEM;
1004         down(&xlnk_devpack_sem);
1005         xdma_release_channel((struct xdma_chan *)
1006                              (temp_args.dmarelease.dmachan));
1007         up(&xlnk_devpack_sem);
1008 #endif
1009
1010         return status;
1011 }
1012
1013 static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
1014                                   unsigned long args)
1015 {
1016         union xlnk_args temp_args;
1017         int status;
1018         xlnk_intptr_type handle;
1019
1020         status = copy_from_user(&temp_args, (void __user *)args,
1021                                 sizeof(union xlnk_args));
1022
1023         if (status)
1024                 return -ENOMEM;
1025
1026         status = xlnk_devregister(temp_args.devregister.name,
1027                                   temp_args.devregister.id,
1028                                   temp_args.devregister.base,
1029                                   temp_args.devregister.size,
1030                                   temp_args.devregister.irqs,
1031                                   &handle);
1032
1033         return status;
1034 }
1035
1036 static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
1037                                   unsigned long args)
1038 {
1039         union xlnk_args temp_args;
1040         int status;
1041         xlnk_intptr_type handle;
1042
1043         status = copy_from_user(&temp_args, (void __user *)args,
1044                                 sizeof(union xlnk_args));
1045
1046         if (status)
1047                 return -ENOMEM;
1048
1049         status = xlnk_dmaregister(temp_args.dmaregister.name,
1050                                   temp_args.dmaregister.id,
1051                                   temp_args.dmaregister.base,
1052                                   temp_args.dmaregister.size,
1053                                   temp_args.dmaregister.chan_num,
1054                                   temp_args.dmaregister.chan0_dir,
1055                                   temp_args.dmaregister.chan0_irq,
1056                                   temp_args.dmaregister.chan0_poll_mode,
1057                                   temp_args.dmaregister.chan0_include_dre,
1058                                   temp_args.dmaregister.chan0_data_width,
1059                                   temp_args.dmaregister.chan1_dir,
1060                                   temp_args.dmaregister.chan1_irq,
1061                                   temp_args.dmaregister.chan1_poll_mode,
1062                                   temp_args.dmaregister.chan1_include_dre,
1063                                   temp_args.dmaregister.chan1_data_width,
1064                                   &handle);
1065
1066         return status;
1067 }
1068
1069 static int xlnk_devunregister_ioctl(struct file *filp,
1070                                     unsigned int code,
1071                                     unsigned long args)
1072 {
1073         union xlnk_args temp_args;
1074         int status;
1075
1076         status = copy_from_user(&temp_args, (void __user *)args,
1077                                 sizeof(union xlnk_args));
1078
1079         if (status)
1080                 return -ENOMEM;
1081
1082         xlnk_devpacks_free(temp_args.devunregister.base);
1083
1084         return 0;
1085 }
1086
1087 static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
1088                                    unsigned long args)
1089 {
1090         union xlnk_args temp_args;
1091         int status, size;
1092         void *kaddr;
1093         xlnk_intptr_type paddr;
1094         int buf_id;
1095
1096         status = copy_from_user(&temp_args,
1097                                 (void __user *)args,
1098                                 sizeof(union xlnk_args));
1099
1100         if (status) {
1101                 dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
1102                         status);
1103                 return -ENOMEM;
1104         }
1105
1106         if (!(temp_args.cachecontrol.action == 0 ||
1107               temp_args.cachecontrol.action == 1)) {
1108                 dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
1109                         temp_args.cachecontrol.action);
1110                 return -EINVAL;
1111         }
1112
1113         size = temp_args.cachecontrol.size;
1114         paddr = temp_args.cachecontrol.phys_addr;
1115
1116         spin_lock(&xlnk_buf_lock);
1117         buf_id = xlnk_buf_find_by_phys_addr(paddr);
1118         kaddr = xlnk_bufpool[buf_id];
1119         spin_unlock(&xlnk_buf_lock);
1120
1121         if (buf_id == 0) {
1122                 pr_err("Illegal cachecontrol on non-sds_alloc memory");
1123                 return -EINVAL;
1124         }
1125
1126 #if XLNK_SYS_BIT_WIDTH == 32
1127         __cpuc_flush_dcache_area(kaddr, size);
1128         outer_flush_range(paddr, paddr + size);
1129         if (temp_args.cachecontrol.action == 1)
1130                 outer_inv_range(paddr, paddr + size);
1131 #else
1132         if (temp_args.cachecontrol.action == 1)
1133                 __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
1134         else
1135                 __dma_map_area(kaddr, size, DMA_TO_DEVICE);
1136 #endif
1137         return 0;
1138 }
1139
1140 static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
1141 {
1142         union xlnk_args args;
1143         xlnk_intptr_type p_addr = 0;
1144         int status = 0;
1145         int buf_id;
1146         struct xlnk_dmabuf_reg *cp = NULL;
1147         int cacheable = 1;
1148         enum dma_data_direction dmadir;
1149         xlnk_intptr_type page_id;
1150         unsigned int page_offset;
1151         struct scatterlist sg;
1152         unsigned long attrs = 0;
1153
1154         status = copy_from_user(&args,
1155                                 (void __user *)arg_addr,
1156                                 sizeof(union xlnk_args));
1157
1158         if (status) {
1159                 pr_err("Error in copy_from_user.  status = %d\n", status);
1160                 return status;
1161         }
1162
1163         if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
1164             !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
1165                 pr_err("memop lacks acquire or release flag\n");
1166                 return -EINVAL;
1167         }
1168
1169         if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
1170             args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
1171                 pr_err("memop has both acquire and release defined\n");
1172                 return -EINVAL;
1173         }
1174
1175         spin_lock(&xlnk_buf_lock);
1176         buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
1177                                             current->pid);
1178         if (buf_id > 0) {
1179                 cacheable = xlnk_bufcacheable[buf_id];
1180                 p_addr = xlnk_phyaddr[buf_id] +
1181                         (args.memop.virt_addr - xlnk_userbuf[buf_id]);
1182         } else {
1183                 struct xlnk_dmabuf_reg *dp;
1184
1185                 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
1186                         if (dp->user_vaddr == args.memop.virt_addr) {
1187                                 cp = dp;
1188                                 break;
1189                         }
1190                 }
1191         }
1192         spin_unlock(&xlnk_buf_lock);
1193
1194         if (buf_id <= 0 && !cp) {
1195                 pr_err("Error, buffer not found\n");
1196                 return -EINVAL;
1197         }
1198
1199         dmadir = (enum dma_data_direction)args.memop.dir;
1200
1201         if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
1202                 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
1203
1204         if (buf_id > 0) {
1205                 page_id = p_addr >> PAGE_SHIFT;
1206                 page_offset = p_addr - (page_id << PAGE_SHIFT);
1207                 sg_init_table(&sg, 1);
1208                 sg_set_page(&sg,
1209                             pfn_to_page(page_id),
1210                             args.memop.size,
1211                             page_offset);
1212                 sg_dma_len(&sg) = args.memop.size;
1213         }
1214
1215         if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
1216                 if (buf_id > 0) {
1217                         status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
1218                                                                &sg,
1219                                                                1,
1220                                                                dmadir,
1221                                                                attrs);
1222                         if (!status) {
1223                                 pr_err("Failed to map address\n");
1224                                 return -EINVAL;
1225                         }
1226                         args.memop.phys_addr = (xlnk_intptr_type)
1227                                 sg_dma_address(&sg);
1228                         args.memop.token = (xlnk_intptr_type)
1229                                 sg_dma_address(&sg);
1230                         status = copy_to_user((void __user *)arg_addr,
1231                                               &args,
1232                                               sizeof(union xlnk_args));
1233                         if (status)
1234                                 pr_err("Error in copy_to_user.  status = %d\n",
1235                                        status);
1236                 } else {
1237                         if (cp->dbuf_sg_table->nents != 1) {
1238                                 pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs.  DMABUF is not physically contiguous\n");
1239                                 return -EINVAL;
1240                         }
1241                         args.memop.phys_addr = (xlnk_intptr_type)
1242                                 sg_dma_address(cp->dbuf_sg_table->sgl);
1243                         args.memop.token = 0;
1244                         status = copy_to_user((void __user *)arg_addr,
1245                                               &args,
1246                                               sizeof(union xlnk_args));
1247                         if (status)
1248                                 pr_err("Error in copy_to_user.  status = %d\n",
1249                                        status);
1250                 }
1251         } else {
1252                 if (buf_id > 0) {
1253                         sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
1254                         get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
1255                                                         &sg,
1256                                                         1,
1257                                                         dmadir,
1258                                                         attrs);
1259                 }
1260         }
1261
1262         return status;
1263 }
1264
1265 /* This function provides IO interface to the bridge driver. */
1266 static long xlnk_ioctl(struct file *filp,
1267                        unsigned int code,
1268                        unsigned long args)
1269 {
1270         if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
1271                 return -ENOTTY;
1272         if (_IOC_NR(code) > XLNK_IOC_MAXNR)
1273                 return -ENOTTY;
1274
1275         /* some sanity check */
1276         switch (code) {
1277         case XLNK_IOCALLOCBUF:
1278                 return xlnk_allocbuf_ioctl(filp, code, args);
1279         case XLNK_IOCFREEBUF:
1280                 return xlnk_freebuf_ioctl(filp, code, args);
1281         case XLNK_IOCADDDMABUF:
1282                 return xlnk_adddmabuf_ioctl(filp, code, args);
1283         case XLNK_IOCCLEARDMABUF:
1284                 return xlnk_cleardmabuf_ioctl(filp, code, args);
1285         case XLNK_IOCDMAREQUEST:
1286                 return xlnk_dmarequest_ioctl(filp, code, args);
1287         case XLNK_IOCDMASUBMIT:
1288                 return xlnk_dmasubmit_ioctl(filp, code, args);
1289         case XLNK_IOCDMAWAIT:
1290                 return xlnk_dmawait_ioctl(filp, code, args);
1291         case XLNK_IOCDMARELEASE:
1292                 return xlnk_dmarelease_ioctl(filp, code, args);
1293         case XLNK_IOCDEVREGISTER:
1294                 return xlnk_devregister_ioctl(filp, code, args);
1295         case XLNK_IOCDMAREGISTER:
1296                 return xlnk_dmaregister_ioctl(filp, code, args);
1297         case XLNK_IOCDEVUNREGISTER:
1298                 return xlnk_devunregister_ioctl(filp, code, args);
1299         case XLNK_IOCCACHECTRL:
1300                 return xlnk_cachecontrol_ioctl(filp, code, args);
1301         case XLNK_IOCSHUTDOWN:
1302                 return xlnk_shutdown(args);
1303         case XLNK_IOCRECRES:
1304                 return xlnk_recover_resource(args);
1305         case XLNK_IOCMEMOP:
1306                 return xlnk_memop_ioctl(filp, args);
1307         default:
1308                 return -EINVAL;
1309         }
1310 }
1311
1312 static const struct vm_operations_struct xlnk_vm_ops = {
1313         .open = xlnk_vma_open,
1314         .close = xlnk_vma_close,
1315 };
1316
1317 /* This function maps kernel space memory to user space memory. */
1318 static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
1319 {
1320         int bufid;
1321         int status;
1322
1323         bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
1324
1325         if (bufid == 0) {
1326                 unsigned long paddr = virt_to_phys(xlnk_dev_buf);
1327
1328                 status = remap_pfn_range(vma,
1329                                          vma->vm_start,
1330                                          paddr >> PAGE_SHIFT,
1331                                          vma->vm_end - vma->vm_start,
1332                                          vma->vm_page_prot);
1333         } else {
1334                 if (xlnk_bufcacheable[bufid] == 0)
1335                         vma->vm_page_prot =
1336                                 pgprot_noncached(vma->vm_page_prot);
1337                 status = remap_pfn_range(vma, vma->vm_start,
1338                                          xlnk_phyaddr[bufid]
1339                                          >> PAGE_SHIFT,
1340                                          vma->vm_end - vma->vm_start,
1341                                          vma->vm_page_prot);
1342                 xlnk_userbuf[bufid] = vma->vm_start;
1343                 xlnk_buf_process[bufid] = current->pid;
1344         }
1345         if (status) {
1346                 pr_err("%s failed with code %d\n", __func__, status);
1347                 return status;
1348         }
1349
1350         xlnk_vma_open(vma);
1351         vma->vm_ops = &xlnk_vm_ops;
1352         vma->vm_private_data = xlnk_bufpool[bufid];
1353
1354         return 0;
1355 }
1356
1357 static void xlnk_vma_open(struct vm_area_struct *vma)
1358 {
1359         xlnk_dev_vmas++;
1360 }
1361
1362 static void xlnk_vma_close(struct vm_area_struct *vma)
1363 {
1364         xlnk_dev_vmas--;
1365 }
1366
1367 static int xlnk_shutdown(unsigned long buf)
1368 {
1369         return 0;
1370 }
1371
1372 static int xlnk_recover_resource(unsigned long buf)
1373 {
1374         xlnk_free_all_buf();
1375 #ifdef CONFIG_XILINX_DMA_APF
1376         xdma_release_all_channels();
1377 #endif
1378         return 0;
1379 }
1380
1381 module_platform_driver(xlnk_driver);
1382
1383 MODULE_DESCRIPTION("Xilinx APF driver");
1384 MODULE_LICENSE("GPL");