]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_ioctl.c
845b04a53b0a6bc0df15bbc7abd997dfa161226f
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "nvmap: %s() " fmt, __func__
24
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nvmap.h>
32 #include <linux/vmalloc.h>
33
34 #include <asm/memory.h>
35
36 #include <trace/events/nvmap.h>
37
38 #include "nvmap_ioctl.h"
39 #include "nvmap_priv.h"
40
41 #include <linux/list.h>
42
43 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
44                          int is_read, unsigned long h_offs,
45                          unsigned long sys_addr, unsigned long h_stride,
46                          unsigned long sys_stride, unsigned long elem_size,
47                          unsigned long count);
48
49 static struct nvmap_handle *fd_to_handle_id(int handle)
50 {
51         struct nvmap_handle *h;
52
53         h = nvmap_get_id_from_dmabuf_fd(NULL, handle);
54         if (!IS_ERR(h))
55                 return h;
56         return 0;
57 }
58
59 static struct nvmap_handle *unmarshal_user_handle(__u32 handle)
60 {
61         return fd_to_handle_id((int)handle);
62 }
63
64 struct nvmap_handle *unmarshal_user_id(u32 id)
65 {
66         return unmarshal_user_handle(id);
67 }
68
69 /*
70  * marshal_id/unmarshal_id are for get_id/handle_from_id.
71  * These are added to support using Fd's for handle.
72  */
73 #ifdef CONFIG_ARM64
74 static __u32 marshal_id(struct nvmap_handle *handle)
75 {
76         return (__u32)((uintptr_t)handle >> 2);
77 }
78
79 static struct nvmap_handle *unmarshal_id(__u32 id)
80 {
81         uintptr_t h = ((id << 2) | PAGE_OFFSET);
82
83         return (struct nvmap_handle *)h;
84 }
85 #else
86 static __u32 marshal_id(struct nvmap_handle *handle)
87 {
88         return (uintptr_t)handle;
89 }
90
91 static struct nvmap_handle *unmarshal_id(__u32 id)
92 {
93         return (struct nvmap_handle *)id;
94 }
95 #endif
96
97 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
98 {
99         if (!virt_addr_valid(ref))
100                 return 0;
101         return ref->handle;
102 }
103
104 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
105                       bool is32)
106 {
107 #ifdef CONFIG_COMPAT
108         struct nvmap_pin_handle_32 op32;
109         __u32 __user *output32 = NULL;
110 #endif
111         struct nvmap_pin_handle op;
112         struct nvmap_handle *h;
113         struct nvmap_handle *on_stack[16];
114         struct nvmap_handle **refs;
115         unsigned long __user *output;
116         unsigned int i;
117         int err = 0;
118
119 #ifdef CONFIG_COMPAT
120         if (is32) {
121                 if (copy_from_user(&op32, arg, sizeof(op32)))
122                         return -EFAULT;
123                 op.handles = (__u32 *)(uintptr_t)op32.handles;
124                 op.count = op32.count;
125         } else
126 #endif
127                 if (copy_from_user(&op, arg, sizeof(op)))
128                         return -EFAULT;
129
130         if (!op.count)
131                 return -EINVAL;
132
133         if (op.count > 1) {
134                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
135
136                 if (op.count > ARRAY_SIZE(on_stack))
137                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
138                 else
139                         refs = on_stack;
140
141                 if (!refs)
142                         return -ENOMEM;
143
144                 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
145                         err = -EFAULT;
146                         goto out;
147                 }
148
149                 for (i = 0; i < op.count; i++) {
150                         u32 handle;
151                         if (__get_user(handle, &op.handles[i])) {
152                                 err = -EFAULT;
153                                 goto out;
154                         }
155                         refs[i] = unmarshal_user_handle(handle);
156                         if (!refs[i]) {
157                                 err = -EINVAL;
158                                 goto out;
159                         }
160                 }
161         } else {
162                 refs = on_stack;
163
164                 /* Yes, we're storing a u32 in a pointer */
165                 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
166                 if (!on_stack[0]) {
167                         err = -EINVAL;
168                         goto out;
169                 }
170         }
171
172         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
173         if (is_pin)
174                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
175         else
176                 nvmap_unpin_ids(filp->private_data, op.count, refs);
177
178         /* skip the output stage on unpin */
179         if (err || !is_pin)
180                 goto out;
181
182         /* it is guaranteed that if nvmap_pin_ids returns 0 that
183          * all of the handle_ref objects are valid, so dereferencing
184          * directly here is safe */
185 #ifdef CONFIG_COMPAT
186         if (is32) {
187                 if (op.count > 1)
188                         output32 = (__u32 *)(uintptr_t)op.addr;
189                 else {
190                         struct nvmap_pin_handle_32 __user *tmp = arg;
191                         output32 = &tmp->addr;
192                 }
193
194                 if (!output32)
195                         goto out;
196         } else
197 #endif
198         {
199                 if (op.count > 1)
200                         output = op.addr;
201                 else {
202                         struct nvmap_pin_handle __user *tmp = arg;
203                         output = (unsigned long *)&tmp->addr;
204                 }
205
206                 if (!output)
207                         goto out;
208         }
209
210         for (i = 0; i < op.count && !err; i++) {
211                 unsigned long addr;
212
213                 h = refs[i];
214                 if (h->heap_pgalloc && h->pgalloc.contig)
215                         addr = page_to_phys(h->pgalloc.pages[0]);
216                 else if (h->heap_pgalloc)
217                         addr = sg_dma_address(
218                                 ((struct sg_table *)h->attachment->priv)->sgl);
219                 else
220                         addr = h->carveout->base;
221
222 #ifdef CONFIG_COMPAT
223                 if (is32)
224                         err = put_user((__u32)addr, &output32[i]);
225                 else
226 #endif
227                         err = put_user(addr, &output[i]);
228         }
229
230         if (err)
231                 nvmap_unpin_ids(filp->private_data, op.count, refs);
232
233 out:
234         if (refs != on_stack)
235                 kfree(refs);
236
237         return err;
238 }
239
240 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
241 {
242         struct nvmap_client *client = filp->private_data;
243         struct nvmap_create_handle op;
244         struct nvmap_handle *h = NULL;
245
246         if (copy_from_user(&op, arg, sizeof(op)))
247                 return -EFAULT;
248
249         h = unmarshal_user_handle(op.handle);
250         if (!h)
251                 return -EINVAL;
252
253         h = nvmap_handle_get(h);
254
255         if (!h)
256                 return -EPERM;
257
258         op.id = marshal_id(h);
259         if (client == h->owner)
260                 h->global = true;
261
262         nvmap_handle_put(h);
263
264         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
265 }
266
267 static int nvmap_share_release(struct inode *inode, struct file *file)
268 {
269         struct nvmap_handle *h = file->private_data;
270
271         nvmap_handle_put(h);
272         return 0;
273 }
274
275 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
276 {
277         /* unsupported operation */
278         WARN(1, "mmap is not supported on fd, which shares nvmap handle");
279         return -EPERM;
280 }
281
282 const struct file_operations nvmap_fd_fops = {
283         .owner          = THIS_MODULE,
284         .release        = nvmap_share_release,
285         .mmap           = nvmap_share_mmap,
286 };
287
288 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
289 {
290         struct nvmap_handle *handle;
291         struct nvmap_create_handle op;
292         struct nvmap_client *client = filp->private_data;
293
294         if (copy_from_user(&op, arg, sizeof(op)))
295                 return -EFAULT;
296
297         handle = unmarshal_user_handle(op.handle);
298         if (!handle)
299                 return -EINVAL;
300
301         op.fd = nvmap_get_dmabuf_fd(client, handle);
302         if (op.fd < 0)
303                 return op.fd;
304
305         if (copy_to_user(arg, &op, sizeof(op))) {
306                 sys_close(op.fd);
307                 return -EFAULT;
308         }
309         return 0;
310 }
311
312 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
313 {
314         struct nvmap_alloc_handle op;
315         struct nvmap_client *client = filp->private_data;
316         struct nvmap_handle *handle;
317
318         if (copy_from_user(&op, arg, sizeof(op)))
319                 return -EFAULT;
320
321         handle = unmarshal_user_handle(op.handle);
322         if (!handle)
323                 return -EINVAL;
324
325         if (op.align & (op.align - 1))
326                 return -EINVAL;
327
328         /* user-space handles are aligned to page boundaries, to prevent
329          * data leakage. */
330         op.align = max_t(size_t, op.align, PAGE_SIZE);
331 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
332         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
333 #endif
334
335         return nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
336                                   0, /* no kind */
337                                   op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
338 }
339
340 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
341 {
342         struct nvmap_alloc_kind_handle op;
343         struct nvmap_client *client = filp->private_data;
344         struct nvmap_handle *handle;
345
346         if (copy_from_user(&op, arg, sizeof(op)))
347                 return -EFAULT;
348
349         handle = unmarshal_user_handle(op.handle);
350         if (!handle)
351                 return -EINVAL;
352
353         if (op.align & (op.align - 1))
354                 return -EINVAL;
355
356         /* user-space handles are aligned to page boundaries, to prevent
357          * data leakage. */
358         op.align = max_t(size_t, op.align, PAGE_SIZE);
359 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
360         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
361 #endif
362
363         return nvmap_alloc_handle(client, handle,
364                                   op.heap_mask,
365                                   op.align,
366                                   op.kind,
367                                   op.flags);
368 }
369
370 int nvmap_create_fd(struct nvmap_handle *h)
371 {
372         int fd;
373
374         fd = __nvmap_dmabuf_fd(h->dmabuf, O_CLOEXEC);
375         BUG_ON(fd == 0);
376         if (fd < 0) {
377                 pr_err("Out of file descriptors");
378                 return fd;
379         }
380         /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
381          * fd close drops one ref count on dmabuf->file *.
382          * to balance ref count, ref count dma_buf.
383          */
384         get_dma_buf(h->dmabuf);
385         return fd;
386 }
387
388 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
389 {
390         struct nvmap_create_handle op;
391         struct nvmap_handle_ref *ref = NULL;
392         struct nvmap_client *client = filp->private_data;
393         int err = 0;
394         int fd = 0;
395
396         if (copy_from_user(&op, arg, sizeof(op)))
397                 return -EFAULT;
398
399         if (!client)
400                 return -ENODEV;
401
402         if (cmd == NVMAP_IOC_CREATE) {
403                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
404                 if (!IS_ERR(ref))
405                         ref->handle->orig_size = op.size;
406         } else if (cmd == NVMAP_IOC_FROM_ID) {
407                 ref = nvmap_duplicate_handle(client, unmarshal_id(op.id), 0);
408         } else if (cmd == NVMAP_IOC_FROM_FD) {
409                 ref = nvmap_create_handle_from_fd(client, op.fd);
410         } else {
411                 return -EINVAL;
412         }
413
414         if (IS_ERR(ref))
415                 return PTR_ERR(ref);
416
417         fd = nvmap_create_fd(ref->handle);
418         if (fd < 0)
419                 err = fd;
420
421         op.handle = fd;
422
423         if (copy_to_user(arg, &op, sizeof(op))) {
424                 err = -EFAULT;
425                 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
426         }
427
428         if (err && fd > 0)
429                 sys_close(fd);
430         return err;
431 }
432
433 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
434 {
435         struct nvmap_client *client = filp->private_data;
436         struct nvmap_map_caller op;
437 #ifdef CONFIG_COMPAT
438         struct nvmap_map_caller_32 op32;
439 #endif
440         struct nvmap_vma_priv *priv;
441         struct vm_area_struct *vma;
442         struct nvmap_handle *h = NULL;
443         int err = 0;
444
445 #ifdef CONFIG_COMPAT
446         if (is32) {
447                 if (copy_from_user(&op32, arg, sizeof(op32)))
448                         return -EFAULT;
449                 op.handle = op32.handle;
450                 op.offset = op32.offset;
451                 op.length = op32.length;
452                 op.flags = op32.length;
453                 op.addr = op32.addr;
454         } else
455 #endif
456                 if (copy_from_user(&op, arg, sizeof(op)))
457                         return -EFAULT;
458
459         h = unmarshal_user_handle(op.handle);
460
461         if (!h)
462                 return -EINVAL;
463
464         h = nvmap_handle_get(h);
465
466         if (!h)
467                 return -EPERM;
468
469         if(!h->alloc) {
470                 nvmap_handle_put(h);
471                 return -EFAULT;
472         }
473
474         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
475                                         op.length, op.flags);
476         down_read(&current->mm->mmap_sem);
477
478         vma = find_vma(current->mm, op.addr);
479         if (!vma) {
480                 err = -ENOMEM;
481                 goto out;
482         }
483
484         if (op.offset & ~PAGE_MASK) {
485                 err = -EFAULT;
486                 goto out;
487         }
488
489         if (op.offset >= h->size || op.length > h->size - op.offset) {
490                 err = -EADDRNOTAVAIL;
491                 goto out;
492         }
493
494         /* the VMA must exactly match the requested mapping operation, and the
495          * VMA that is targetted must have been created by this driver
496          */
497         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
498             (vma->vm_end-vma->vm_start != op.length)) {
499                 err = -EPERM;
500                 goto out;
501         }
502
503         /* verify that each mmap() system call creates a unique VMA */
504         if (vma->vm_private_data)
505                 goto out;
506
507         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
508                 err = -EFAULT;
509                 goto out;
510         }
511
512         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
513         if (!priv)  {
514                 err = -ENOMEM;
515                 goto out;
516         }
517
518         vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
519         priv->handle = h;
520         priv->offs = op.offset;
521         vma->vm_private_data = priv;
522         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
523         nvmap_vma_open(vma);
524
525 out:
526         up_read(&current->mm->mmap_sem);
527
528         if (err)
529                 nvmap_handle_put(h);
530         return err;
531 }
532
533 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
534 {
535 #ifdef CONFIG_COMPAT
536         struct nvmap_handle_param_32 __user *uarg32 = arg;
537 #endif
538         struct nvmap_handle_param __user *uarg = arg;
539         struct nvmap_handle_param op;
540         struct nvmap_client *client = filp->private_data;
541         struct nvmap_handle_ref *ref;
542         struct nvmap_handle *h;
543         u64 result;
544         int err = 0;
545
546 #ifdef CONFIG_COMPAT
547         /* This is safe because the incoming value of result doesn't matter */
548         if (is32) {
549                 if (copy_from_user(&op, arg,
550                                 sizeof(struct nvmap_handle_param_32)))
551                         return -EFAULT;
552         } else
553 #endif
554                 if (copy_from_user(&op, arg, sizeof(op)))
555                         return -EFAULT;
556
557         h = unmarshal_user_handle(op.handle);
558         if (!h)
559                 return -EINVAL;
560
561         h = nvmap_handle_get(h);
562         if (!h)
563                 return -EINVAL;
564
565         nvmap_ref_lock(client);
566         ref = __nvmap_validate_locked(client, h);
567         if (IS_ERR_OR_NULL(ref)) {
568                 err = ref ? PTR_ERR(ref) : -EINVAL;
569                 goto ref_fail;
570         }
571
572         err = nvmap_get_handle_param(client, ref, op.param, &result);
573
574 #ifdef CONFIG_COMPAT
575         if (is32)
576                 err = put_user((__u32)result, &uarg32->result);
577         else
578 #endif
579                 err = put_user((unsigned long)result, &uarg->result);
580
581 ref_fail:
582         nvmap_ref_unlock(client);
583         nvmap_handle_put(h);
584         return err;
585 }
586
587 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
588                           bool is32)
589 {
590         struct nvmap_client *client = filp->private_data;
591         struct nvmap_rw_handle __user *uarg = arg;
592         struct nvmap_rw_handle op;
593 #ifdef CONFIG_COMPAT
594         struct nvmap_rw_handle_32 __user *uarg32 = arg;
595         struct nvmap_rw_handle_32 op32;
596 #endif
597         struct nvmap_handle *h;
598         ssize_t copied;
599         int err = 0;
600
601 #ifdef CONFIG_COMPAT
602         if (is32) {
603                 if (copy_from_user(&op32, arg, sizeof(op32)))
604                         return -EFAULT;
605                 op.addr = op32.addr;
606                 op.handle = op32.handle;
607                 op.offset = op32.offset;
608                 op.elem_size = op32.elem_size;
609                 op.hmem_stride = op32.hmem_stride;
610                 op.user_stride = op32.user_stride;
611                 op.count = op32.count;
612         } else
613 #endif
614                 if (copy_from_user(&op, arg, sizeof(op)))
615                         return -EFAULT;
616
617         h = unmarshal_user_handle(op.handle);
618         if (!h || !op.addr || !op.count || !op.elem_size)
619                 return -EINVAL;
620
621         h = nvmap_handle_get(h);
622         if (!h)
623                 return -EPERM;
624
625         nvmap_kmaps_inc(h);
626         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
627                                     op.addr, op.hmem_stride,
628                                     op.user_stride, op.elem_size, op.count);
629         copied = rw_handle(client, h, is_read, op.offset,
630                            (unsigned long)op.addr, op.hmem_stride,
631                            op.user_stride, op.elem_size, op.count);
632         nvmap_kmaps_dec(h);
633
634         if (copied < 0) {
635                 err = copied;
636                 copied = 0;
637         } else if (copied < (op.count * op.elem_size))
638                 err = -EINTR;
639
640 #ifdef CONFIG_COMPAT
641         if (is32)
642                 __put_user(copied, &uarg32->count);
643         else
644 #endif
645                 __put_user(copied, &uarg->count);
646
647         nvmap_handle_put(h);
648
649         return err;
650 }
651
652 static int __nvmap_cache_maint(struct nvmap_client *client,
653                                struct nvmap_cache_op *op)
654 {
655         struct vm_area_struct *vma;
656         struct nvmap_vma_priv *priv;
657         struct nvmap_handle *handle;
658         unsigned long start;
659         unsigned long end;
660         int err = 0;
661
662         handle = unmarshal_user_handle(op->handle);
663         if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB ||
664             op->op > NVMAP_CACHE_OP_WB_INV)
665                 return -EINVAL;
666
667         down_read(&current->mm->mmap_sem);
668
669         vma = find_vma(current->active_mm, (unsigned long)op->addr);
670         if (!vma || !is_nvmap_vma(vma) ||
671             (ulong)op->addr < vma->vm_start ||
672             (ulong)op->addr >= vma->vm_end ||
673             op->len > vma->vm_end - (ulong)op->addr) {
674                 err = -EADDRNOTAVAIL;
675                 goto out;
676         }
677
678         priv = (struct nvmap_vma_priv *)vma->vm_private_data;
679
680         if (priv->handle != handle) {
681                 err = -EFAULT;
682                 goto out;
683         }
684
685         start = (unsigned long)op->addr - vma->vm_start +
686                 (vma->vm_pgoff << PAGE_SHIFT);
687         end = start + op->len;
688
689         err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
690                                      false);
691 out:
692         up_read(&current->mm->mmap_sem);
693         return err;
694 }
695
696 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
697 {
698         struct nvmap_client *client = filp->private_data;
699         struct nvmap_cache_op op;
700 #ifdef CONFIG_COMPAT
701         struct nvmap_cache_op_32 op32;
702 #endif
703
704 #ifdef CONFIG_COMPAT
705         if (is32) {
706                 if (copy_from_user(&op32, arg, sizeof(op32)))
707                         return -EFAULT;
708                 op.addr = op32.addr;
709                 op.handle = op32.handle;
710                 op.len = op32.len;
711                 op.op = op32.op;
712         } else
713 #endif
714                 if (copy_from_user(&op, arg, sizeof(op)))
715                         return -EFAULT;
716
717         return __nvmap_cache_maint(client, &op);
718 }
719
720 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
721 {
722         struct nvmap_client *client = filp->private_data;
723
724         if (!arg)
725                 return 0;
726
727         nvmap_free_handle_user_id(client, arg);
728         return sys_close(arg);
729 }
730
731 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
732 {
733         if (op == NVMAP_CACHE_OP_WB_INV)
734                 dmac_flush_range(vaddr, vaddr + size);
735         else if (op == NVMAP_CACHE_OP_INV)
736                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
737         else
738                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
739 }
740
741 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
742 {
743         if (op == NVMAP_CACHE_OP_WB_INV)
744                 outer_flush_range(paddr, paddr + size);
745         else if (op == NVMAP_CACHE_OP_INV)
746                 outer_inv_range(paddr, paddr + size);
747         else
748                 outer_clean_range(paddr, paddr + size);
749 }
750
751 static void heap_page_cache_maint(
752         struct nvmap_handle *h, unsigned long start, unsigned long end,
753         unsigned int op, bool inner, bool outer, pte_t **pte,
754         unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
755 {
756         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
757                 /*
758                  * zap user VA->PA mappings so that any access to the pages
759                  * will result in a fault and can be marked dirty
760                  */
761                 nvmap_handle_mkclean(h, start, end-start);
762                 nvmap_zap_handle(h, start, end - start);
763         }
764
765 #ifdef NVMAP_LAZY_VFREE
766         if (inner) {
767                 void *vaddr = NULL;
768
769                 if (!h->vaddr) {
770                         struct page **pages;
771                         /* mutex lock protection is not necessary as it is
772                          * already increased in __nvmap_do_cache_maint to
773                          * protect from migrations.
774                          */
775                         nvmap_kmaps_inc_no_lock(h);
776                         pages = nvmap_pages(h->pgalloc.pages,
777                                             h->size >> PAGE_SHIFT);
778                         if (!pages)
779                                 goto per_page_cache_maint;
780                         vaddr = vm_map_ram(pages,
781                                         h->size >> PAGE_SHIFT, -1, prot);
782                         nvmap_altfree(pages,
783                                 (h->size >> PAGE_SHIFT) * sizeof(*pages));
784                 }
785                 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr)) {
786                         nvmap_kmaps_dec(h);
787                         vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
788                 }
789                 if (h->vaddr) {
790                         /* Fast inner cache maintenance using single mapping */
791                         inner_cache_maint(op, h->vaddr + start, end - start);
792                         if (!outer)
793                                 return;
794                         /* Skip per-page inner maintenance in loop below */
795                         inner = false;
796                 }
797         }
798 per_page_cache_maint:
799         if (!h->vaddr)
800                 nvmap_kmaps_dec(h);
801 #endif
802
803         while (start < end) {
804                 struct page *page;
805                 phys_addr_t paddr;
806                 unsigned long next;
807                 unsigned long off;
808                 size_t size;
809
810                 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
811                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
812                 off = start & ~PAGE_MASK;
813                 size = next - start;
814                 paddr = page_to_phys(page) + off;
815
816                 if (inner) {
817                         void *vaddr = (void *)kaddr + off;
818                         BUG_ON(!pte);
819                         BUG_ON(!kaddr);
820                         set_pte_at(&init_mm, kaddr, *pte,
821                                 pfn_pte(__phys_to_pfn(paddr), prot));
822                         nvmap_flush_tlb_kernel_page(kaddr);
823                         inner_cache_maint(op, vaddr, size);
824                 }
825
826                 if (outer)
827                         outer_cache_maint(op, paddr, size);
828                 start = next;
829         }
830 }
831
832 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
833 static bool fast_cache_maint_outer(unsigned long start,
834                 unsigned long end, unsigned int op)
835 {
836         bool result = false;
837         if (end - start >= cache_maint_outer_threshold) {
838                 if (op == NVMAP_CACHE_OP_WB_INV) {
839                         outer_flush_all();
840                         result = true;
841                 }
842                 if (op == NVMAP_CACHE_OP_WB) {
843                         outer_clean_all();
844                         result = true;
845                 }
846         }
847
848         return result;
849 }
850 #else
851 static inline bool fast_cache_maint_outer(unsigned long start,
852                 unsigned long end, unsigned int op)
853 {
854         return false;
855 }
856 #endif
857
858 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
859 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
860         unsigned long start,
861         unsigned long end, unsigned int op)
862 {
863         if ((op == NVMAP_CACHE_OP_INV) ||
864                 ((end - start) < cache_maint_inner_threshold))
865                 return false;
866         return true;
867 }
868 #else
869 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
870         unsigned long start,
871         unsigned long end, unsigned int op)
872 {
873         return false;
874 }
875 #endif
876
877 static bool fast_cache_maint(struct nvmap_handle *h,
878         unsigned long start,
879         unsigned long end, unsigned int op,
880         bool clean_only_dirty)
881 {
882         if (!can_fast_cache_maint(h, start, end, op))
883                 return false;
884
885         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
886                 nvmap_handle_mkclean(h, 0, h->size);
887                 nvmap_zap_handle(h, 0, h->size);
888         }
889
890         if (op == NVMAP_CACHE_OP_WB_INV)
891                 inner_flush_cache_all();
892         else if (op == NVMAP_CACHE_OP_WB)
893                 inner_clean_cache_all();
894
895         /* outer maintenance */
896         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
897                 if(!fast_cache_maint_outer(start, end, op))
898                 {
899                         if (h->heap_pgalloc) {
900                                 heap_page_cache_maint(h, start,
901                                         end, op, false, true, NULL, 0, 0,
902                                         clean_only_dirty);
903                         } else  {
904                                 phys_addr_t pstart;
905
906                                 pstart = start + h->carveout->base;
907                                 outer_cache_maint(op, pstart, end - start);
908                         }
909                 }
910         }
911         return true;
912 }
913
914 struct cache_maint_op {
915         phys_addr_t start;
916         phys_addr_t end;
917         unsigned int op;
918         struct nvmap_handle *h;
919         bool inner;
920         bool outer;
921         bool clean_only_dirty;
922 };
923
924 static int do_cache_maint(struct cache_maint_op *cache_work)
925 {
926         pgprot_t prot;
927         pte_t **pte = NULL;
928         unsigned long kaddr;
929         phys_addr_t pstart = cache_work->start;
930         phys_addr_t pend = cache_work->end;
931         phys_addr_t loop;
932         int err = 0;
933         struct nvmap_handle *h = cache_work->h;
934         struct nvmap_client *client;
935         unsigned int op = cache_work->op;
936
937         if (!h || !h->alloc)
938                 return -EFAULT;
939
940         client = h->owner;
941         if (can_fast_cache_maint(h, pstart, pend, op))
942                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
943         else
944                 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
945         trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
946         trace_nvmap_cache_flush(pend - pstart,
947                 nvmap_stats_read(NS_ALLOC),
948                 nvmap_stats_read(NS_CFLUSH_RQ),
949                 nvmap_stats_read(NS_CFLUSH_DONE));
950
951         wmb();
952         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
953             h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
954                 goto out;
955
956         if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
957                 goto out;
958
959         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
960         pte = nvmap_alloc_pte(h->dev, (void **)&kaddr);
961         if (IS_ERR(pte)) {
962                 err = PTR_ERR(pte);
963                 pte = NULL;
964                 goto out;
965         }
966
967         if (h->heap_pgalloc) {
968                 heap_page_cache_maint(h, pstart, pend, op, true,
969                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
970                                         false : true,
971                         pte, kaddr, prot,
972                         cache_work->clean_only_dirty);
973                 goto out;
974         }
975
976         if (pstart > h->size || pend > h->size) {
977                 pr_warn("cache maintenance outside handle\n");
978                 err = -EINVAL;
979                 goto out;
980         }
981
982         pstart += h->carveout->base;
983         pend += h->carveout->base;
984         loop = pstart;
985
986         while (loop < pend) {
987                 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
988                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
989                 next = min(next, pend);
990
991                 set_pte_at(&init_mm, kaddr, *pte,
992                            pfn_pte(__phys_to_pfn(loop), prot));
993                 nvmap_flush_tlb_kernel_page(kaddr);
994
995                 inner_cache_maint(op, base, next - loop);
996                 loop = next;
997         }
998
999         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
1000                 outer_cache_maint(op, pstart, pend - pstart);
1001
1002 out:
1003         if (pte)
1004                 nvmap_free_pte(h->dev, pte);
1005         return err;
1006 }
1007
1008 int __nvmap_do_cache_maint(struct nvmap_client *client,
1009                         struct nvmap_handle *h,
1010                         unsigned long start, unsigned long end,
1011                         unsigned int op, bool clean_only_dirty)
1012 {
1013         int err;
1014         struct cache_maint_op cache_op;
1015
1016         h = nvmap_handle_get(h);
1017         if (!h)
1018                 return -EFAULT;
1019
1020         nvmap_kmaps_inc(h);
1021         if (op == NVMAP_CACHE_OP_INV)
1022                 op = NVMAP_CACHE_OP_WB_INV;
1023
1024         /* clean only dirty is applicable only for Write Back operation */
1025         if (op != NVMAP_CACHE_OP_WB)
1026                 clean_only_dirty = false;
1027
1028         cache_op.h = h;
1029         cache_op.start = start;
1030         cache_op.end = end;
1031         cache_op.op = op;
1032         cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
1033                          h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1034         cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
1035         cache_op.clean_only_dirty = clean_only_dirty;
1036
1037         nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
1038         err = do_cache_maint(&cache_op);
1039         nvmap_kmaps_dec(h);
1040         nvmap_handle_put(h);
1041         return err;
1042 }
1043
1044 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1045                           unsigned long start, unsigned long rw_addr,
1046                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
1047 {
1048         pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
1049         unsigned long end = start + bytes;
1050         int err = 0;
1051
1052         while (!err && start < end) {
1053                 struct page *page = NULL;
1054                 phys_addr_t phys;
1055                 size_t count;
1056                 void *src;
1057
1058                 if (!h->heap_pgalloc) {
1059                         phys = h->carveout->base + start;
1060                 } else {
1061                         page =
1062                            nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1063                         BUG_ON(!page);
1064                         get_page(page);
1065                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
1066                 }
1067
1068                 set_pte_at(&init_mm, kaddr, pte,
1069                            pfn_pte(__phys_to_pfn(phys), prot));
1070                 nvmap_flush_tlb_kernel_page(kaddr);
1071
1072                 src = (void *)kaddr + (phys & ~PAGE_MASK);
1073                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1074                 count = min_t(size_t, end - start, phys);
1075
1076                 if (is_read)
1077                         err = copy_to_user((void *)rw_addr, src, count);
1078                 else
1079                         err = copy_from_user(src, (void *)rw_addr, count);
1080
1081                 if (err)
1082                         err = -EFAULT;
1083
1084                 rw_addr += count;
1085                 start += count;
1086
1087                 if (page)
1088                         put_page(page);
1089         }
1090
1091         return err;
1092 }
1093
1094 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1095                          int is_read, unsigned long h_offs,
1096                          unsigned long sys_addr, unsigned long h_stride,
1097                          unsigned long sys_stride, unsigned long elem_size,
1098                          unsigned long count)
1099 {
1100         ssize_t copied = 0;
1101         pte_t **pte;
1102         void *addr;
1103         int ret = 0;
1104
1105         if (!elem_size)
1106                 return -EINVAL;
1107
1108         if (!h->alloc)
1109                 return -EFAULT;
1110
1111         if (elem_size == h_stride && elem_size == sys_stride) {
1112                 elem_size *= count;
1113                 h_stride = elem_size;
1114                 sys_stride = elem_size;
1115                 count = 1;
1116         }
1117
1118         pte = nvmap_alloc_pte(nvmap_dev, &addr);
1119         if (IS_ERR(pte))
1120                 return PTR_ERR(pte);
1121
1122         while (count--) {
1123                 if (h_offs + elem_size > h->size) {
1124                         nvmap_warn(client, "read/write outside of handle\n");
1125                         ret = -EFAULT;
1126                         break;
1127                 }
1128                 if (is_read)
1129                         __nvmap_do_cache_maint(client, h, h_offs,
1130                                 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1131
1132                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1133                                      elem_size, (unsigned long)addr, *pte);
1134
1135                 if (ret)
1136                         break;
1137
1138                 if (!is_read)
1139                         __nvmap_do_cache_maint(client, h, h_offs,
1140                                 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1141                                 false);
1142
1143                 copied += elem_size;
1144                 sys_addr += sys_stride;
1145                 h_offs += h_stride;
1146         }
1147
1148         nvmap_free_pte(nvmap_dev, pte);
1149         return ret ?: copied;
1150 }
1151
1152 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1153                                  bool is_reserve_ioctl)
1154 {
1155         struct nvmap_cache_op_list op;
1156         u32 *handle_ptr;
1157         u32 *offset_ptr;
1158         u32 *size_ptr;
1159         struct nvmap_handle **refs;
1160         int i, err = 0;
1161
1162         if (copy_from_user(&op, arg, sizeof(op)))
1163                 return -EFAULT;
1164
1165         if (!op.nr)
1166                 return -EINVAL;
1167
1168         if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1169                 return -EFAULT;
1170
1171         if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1172                 return -EFAULT;
1173
1174         if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1175                 return -EFAULT;
1176
1177         if (!op.offsets || !op.sizes)
1178                 return -EINVAL;
1179
1180         refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1181
1182         if (!refs)
1183                 return -ENOMEM;
1184
1185         handle_ptr = (u32 *)(uintptr_t)op.handles;
1186         offset_ptr = (u32 *)(uintptr_t)op.offsets;
1187         size_ptr = (u32 *)(uintptr_t)op.sizes;
1188
1189         for (i = 0; i < op.nr; i++) {
1190                 u32 handle;
1191
1192                 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1193                         err = -EFAULT;
1194                         goto free_mem;
1195                 }
1196
1197                 refs[i] = unmarshal_user_handle(handle);
1198                 if (!refs[i]) {
1199                         err = -EINVAL;
1200                         goto free_mem;
1201                 }
1202         }
1203
1204         if (is_reserve_ioctl)
1205                 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1206                                           op.nr, op.op);
1207         else
1208                 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,
1209                                                 op.op, op.nr);
1210
1211 free_mem:
1212         kfree(refs);
1213         return err;
1214 }
1215