]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_ioctl.c
video: tegra: nvmap: remove support for Deprecated GET_ID/FROM_ID ioctl's
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "nvmap: %s() " fmt, __func__
24
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nvmap.h>
32 #include <linux/vmalloc.h>
33
34 #include <asm/memory.h>
35
36 #include <trace/events/nvmap.h>
37
38 #include "nvmap_ioctl.h"
39 #include "nvmap_priv.h"
40
41 #include <linux/list.h>
42
43 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
44                          int is_read, unsigned long h_offs,
45                          unsigned long sys_addr, unsigned long h_stride,
46                          unsigned long sys_stride, unsigned long elem_size,
47                          unsigned long count);
48
49 /* NOTE: Callers of this utility function must invoke nvmap_handle_put after
50  * using the returned nvmap_handle.
51  */
52 struct nvmap_handle *unmarshal_user_handle(__u32 handle)
53 {
54         struct nvmap_handle *h;
55
56         h = nvmap_get_id_from_dmabuf_fd(NULL, (int)handle);
57         if (!IS_ERR(h))
58                 return h;
59         return 0;
60 }
61
62 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
63 {
64         if (!virt_addr_valid(ref))
65                 return 0;
66         return ref->handle;
67 }
68
69 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
70                       bool is32)
71 {
72 #ifdef CONFIG_COMPAT
73         struct nvmap_pin_handle_32 op32;
74         __u32 __user *output32 = NULL;
75 #endif
76         struct nvmap_pin_handle op;
77         struct nvmap_handle *h;
78         struct nvmap_handle *on_stack[16];
79         struct nvmap_handle **refs;
80         unsigned long __user *output;
81         int err = 0;
82         u32 i, n_unmarshal_handles = 0;
83
84 #ifdef CONFIG_COMPAT
85         if (is32) {
86                 if (copy_from_user(&op32, arg, sizeof(op32)))
87                         return -EFAULT;
88                 op.handles = (__u32 *)(uintptr_t)op32.handles;
89                 op.count = op32.count;
90         } else
91 #endif
92                 if (copy_from_user(&op, arg, sizeof(op)))
93                         return -EFAULT;
94
95         if (!op.count)
96                 return -EINVAL;
97
98         if (op.count > 1) {
99                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
100
101                 if (op.count > ARRAY_SIZE(on_stack))
102                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
103                 else
104                         refs = on_stack;
105
106                 if (!refs)
107                         return -ENOMEM;
108
109                 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
110                         err = -EFAULT;
111                         goto out;
112                 }
113
114                 for (i = 0; i < op.count; i++) {
115                         u32 handle;
116                         if (__get_user(handle, &op.handles[i])) {
117                                 err = -EFAULT;
118                                 goto out;
119                         }
120                         refs[i] = unmarshal_user_handle(handle);
121                         if (!refs[i]) {
122                                 err = -EINVAL;
123                                 goto out;
124                         }
125                         n_unmarshal_handles++;
126                 }
127         } else {
128                 refs = on_stack;
129
130                 /* Yes, we're storing a u32 in a pointer */
131                 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
132                 if (!on_stack[0]) {
133                         err = -EINVAL;
134                         goto out;
135                 }
136                 n_unmarshal_handles++;
137         }
138
139         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
140         if (is_pin)
141                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
142         else
143                 nvmap_unpin_ids(filp->private_data, op.count, refs);
144
145         /* skip the output stage on unpin */
146         if (err || !is_pin)
147                 goto out;
148
149         /* it is guaranteed that if nvmap_pin_ids returns 0 that
150          * all of the handle_ref objects are valid, so dereferencing
151          * directly here is safe */
152 #ifdef CONFIG_COMPAT
153         if (is32) {
154                 if (op.count > 1)
155                         output32 = (__u32 *)(uintptr_t)op.addr;
156                 else {
157                         struct nvmap_pin_handle_32 __user *tmp = arg;
158                         output32 = &tmp->addr;
159                 }
160
161                 if (!output32)
162                         goto out;
163         } else
164 #endif
165         {
166                 if (op.count > 1)
167                         output = op.addr;
168                 else {
169                         struct nvmap_pin_handle __user *tmp = arg;
170                         output = (unsigned long *)&tmp->addr;
171                 }
172
173                 if (!output)
174                         goto out;
175         }
176
177         for (i = 0; i < op.count && !err; i++) {
178                 unsigned long addr;
179
180                 h = refs[i];
181                 if (h->heap_pgalloc && h->pgalloc.contig)
182                         addr = page_to_phys(h->pgalloc.pages[0]);
183                 else if (h->heap_pgalloc)
184                         addr = sg_dma_address(
185                                 ((struct sg_table *)h->attachment->priv)->sgl);
186                 else
187                         addr = h->carveout->base;
188
189 #ifdef CONFIG_COMPAT
190                 if (is32)
191                         err = put_user((__u32)addr, &output32[i]);
192                 else
193 #endif
194                         err = put_user(addr, &output[i]);
195         }
196
197         if (err)
198                 nvmap_unpin_ids(filp->private_data, op.count, refs);
199
200 out:
201         for (i = 0; i < n_unmarshal_handles; i++)
202                 nvmap_handle_put(refs[i]);
203
204         if (refs != on_stack)
205                 kfree(refs);
206
207         return err;
208 }
209
210 static int nvmap_share_release(struct inode *inode, struct file *file)
211 {
212         struct nvmap_handle *h = file->private_data;
213
214         nvmap_handle_put(h);
215         return 0;
216 }
217
218 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
219 {
220         /* unsupported operation */
221         WARN(1, "mmap is not supported on fd, which shares nvmap handle");
222         return -EPERM;
223 }
224
225 const struct file_operations nvmap_fd_fops = {
226         .owner          = THIS_MODULE,
227         .release        = nvmap_share_release,
228         .mmap           = nvmap_share_mmap,
229 };
230
231 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
232 {
233         struct nvmap_handle *handle;
234         struct nvmap_create_handle op;
235         struct nvmap_client *client = filp->private_data;
236
237         if (copy_from_user(&op, arg, sizeof(op)))
238                 return -EFAULT;
239
240         handle = unmarshal_user_handle(op.handle);
241         if (!handle)
242                 return -EINVAL;
243
244         op.fd = nvmap_get_dmabuf_fd(client, handle);
245         nvmap_handle_put(handle);
246         if (op.fd < 0)
247                 return op.fd;
248
249         if (copy_to_user(arg, &op, sizeof(op))) {
250                 sys_close(op.fd);
251                 return -EFAULT;
252         }
253         return 0;
254 }
255
256 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
257 {
258         struct nvmap_alloc_handle op;
259         struct nvmap_client *client = filp->private_data;
260         struct nvmap_handle *handle;
261         int err;
262
263         if (copy_from_user(&op, arg, sizeof(op)))
264                 return -EFAULT;
265
266         if (op.align & (op.align - 1))
267                 return -EINVAL;
268
269         handle = unmarshal_user_handle(op.handle);
270         if (!handle)
271                 return -EINVAL;
272
273         /* user-space handles are aligned to page boundaries, to prevent
274          * data leakage. */
275         op.align = max_t(size_t, op.align, PAGE_SIZE);
276 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
277         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
278 #endif
279
280         err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
281                                   0, /* no kind */
282                                   op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
283         nvmap_handle_put(handle);
284         return err;
285 }
286
287 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
288 {
289         struct nvmap_alloc_kind_handle op;
290         struct nvmap_client *client = filp->private_data;
291         struct nvmap_handle *handle;
292         int err;
293
294         if (copy_from_user(&op, arg, sizeof(op)))
295                 return -EFAULT;
296
297         if (op.align & (op.align - 1))
298                 return -EINVAL;
299
300         handle = unmarshal_user_handle(op.handle);
301         if (!handle)
302                 return -EINVAL;
303
304         /* user-space handles are aligned to page boundaries, to prevent
305          * data leakage. */
306         op.align = max_t(size_t, op.align, PAGE_SIZE);
307 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
308         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
309 #endif
310
311         err = nvmap_alloc_handle(client, handle,
312                                   op.heap_mask,
313                                   op.align,
314                                   op.kind,
315                                   op.flags);
316         nvmap_handle_put(handle);
317         return err;
318 }
319
320 int nvmap_create_fd(struct nvmap_handle *h)
321 {
322         int fd;
323
324         fd = __nvmap_dmabuf_fd(h->dmabuf, O_CLOEXEC);
325         BUG_ON(fd == 0);
326         if (fd < 0) {
327                 pr_err("Out of file descriptors");
328                 return fd;
329         }
330         /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
331          * fd close drops one ref count on dmabuf->file *.
332          * to balance ref count, ref count dma_buf.
333          */
334         get_dma_buf(h->dmabuf);
335         return fd;
336 }
337
338 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
339 {
340         struct nvmap_create_handle op;
341         struct nvmap_handle_ref *ref = NULL;
342         struct nvmap_client *client = filp->private_data;
343         int err = 0;
344         int fd = 0;
345
346         if (copy_from_user(&op, arg, sizeof(op)))
347                 return -EFAULT;
348
349         if (!client)
350                 return -ENODEV;
351
352         if (cmd == NVMAP_IOC_CREATE) {
353                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
354                 if (!IS_ERR(ref))
355                         ref->handle->orig_size = op.size;
356         } else if (cmd == NVMAP_IOC_FROM_FD) {
357                 ref = nvmap_create_handle_from_fd(client, op.fd);
358         } else {
359                 return -EINVAL;
360         }
361
362         if (IS_ERR(ref))
363                 return PTR_ERR(ref);
364
365         fd = nvmap_create_fd(ref->handle);
366         if (fd < 0)
367                 err = fd;
368
369         op.handle = fd;
370
371         if (copy_to_user(arg, &op, sizeof(op))) {
372                 err = -EFAULT;
373                 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
374         }
375
376         if (err && fd > 0)
377                 sys_close(fd);
378         return err;
379 }
380
381 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
382 {
383         struct nvmap_client *client = filp->private_data;
384         struct nvmap_map_caller op;
385 #ifdef CONFIG_COMPAT
386         struct nvmap_map_caller_32 op32;
387 #endif
388         struct nvmap_vma_priv *priv;
389         struct vm_area_struct *vma;
390         struct nvmap_handle *h = NULL;
391         int err = 0;
392
393 #ifdef CONFIG_COMPAT
394         if (is32) {
395                 if (copy_from_user(&op32, arg, sizeof(op32)))
396                         return -EFAULT;
397                 op.handle = op32.handle;
398                 op.offset = op32.offset;
399                 op.length = op32.length;
400                 op.flags = op32.length;
401                 op.addr = op32.addr;
402         } else
403 #endif
404                 if (copy_from_user(&op, arg, sizeof(op)))
405                         return -EFAULT;
406
407         h = unmarshal_user_handle(op.handle);
408         if (!h)
409                 return -EINVAL;
410
411         if(!h->alloc) {
412                 nvmap_handle_put(h);
413                 return -EFAULT;
414         }
415
416         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
417                                         op.length, op.flags);
418         down_read(&current->mm->mmap_sem);
419
420         vma = find_vma(current->mm, op.addr);
421         if (!vma) {
422                 err = -ENOMEM;
423                 goto out;
424         }
425
426         if (op.offset & ~PAGE_MASK) {
427                 err = -EFAULT;
428                 goto out;
429         }
430
431         if (op.offset >= h->size || op.length > h->size - op.offset) {
432                 err = -EADDRNOTAVAIL;
433                 goto out;
434         }
435
436         /* the VMA must exactly match the requested mapping operation, and the
437          * VMA that is targetted must have been created by this driver
438          */
439         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
440             (vma->vm_end-vma->vm_start != op.length)) {
441                 err = -EPERM;
442                 goto out;
443         }
444
445         /* verify that each mmap() system call creates a unique VMA */
446         if (vma->vm_private_data)
447                 goto out;
448
449         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
450                 err = -EFAULT;
451                 goto out;
452         }
453
454         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
455         if (!priv)  {
456                 err = -ENOMEM;
457                 goto out;
458         }
459
460         vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
461         priv->handle = h;
462         priv->offs = op.offset;
463         vma->vm_private_data = priv;
464         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
465         nvmap_vma_open(vma);
466
467 out:
468         up_read(&current->mm->mmap_sem);
469
470         if (err)
471                 nvmap_handle_put(h);
472         return err;
473 }
474
475 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
476 {
477 #ifdef CONFIG_COMPAT
478         struct nvmap_handle_param_32 __user *uarg32 = arg;
479 #endif
480         struct nvmap_handle_param __user *uarg = arg;
481         struct nvmap_handle_param op;
482         struct nvmap_client *client = filp->private_data;
483         struct nvmap_handle_ref *ref;
484         struct nvmap_handle *h;
485         u64 result;
486         int err = 0;
487
488 #ifdef CONFIG_COMPAT
489         /* This is safe because the incoming value of result doesn't matter */
490         if (is32) {
491                 if (copy_from_user(&op, arg,
492                                 sizeof(struct nvmap_handle_param_32)))
493                         return -EFAULT;
494         } else
495 #endif
496                 if (copy_from_user(&op, arg, sizeof(op)))
497                         return -EFAULT;
498
499         h = unmarshal_user_handle(op.handle);
500         if (!h)
501                 return -EINVAL;
502
503         nvmap_ref_lock(client);
504         ref = __nvmap_validate_locked(client, h);
505         if (IS_ERR_OR_NULL(ref)) {
506                 err = ref ? PTR_ERR(ref) : -EINVAL;
507                 goto ref_fail;
508         }
509
510         err = nvmap_get_handle_param(client, ref, op.param, &result);
511
512 #ifdef CONFIG_COMPAT
513         if (is32)
514                 err = put_user((__u32)result, &uarg32->result);
515         else
516 #endif
517                 err = put_user((unsigned long)result, &uarg->result);
518
519 ref_fail:
520         nvmap_ref_unlock(client);
521         nvmap_handle_put(h);
522         return err;
523 }
524
525 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
526                           bool is32)
527 {
528         struct nvmap_client *client = filp->private_data;
529         struct nvmap_rw_handle __user *uarg = arg;
530         struct nvmap_rw_handle op;
531 #ifdef CONFIG_COMPAT
532         struct nvmap_rw_handle_32 __user *uarg32 = arg;
533         struct nvmap_rw_handle_32 op32;
534 #endif
535         struct nvmap_handle *h;
536         ssize_t copied;
537         int err = 0;
538
539 #ifdef CONFIG_COMPAT
540         if (is32) {
541                 if (copy_from_user(&op32, arg, sizeof(op32)))
542                         return -EFAULT;
543                 op.addr = op32.addr;
544                 op.handle = op32.handle;
545                 op.offset = op32.offset;
546                 op.elem_size = op32.elem_size;
547                 op.hmem_stride = op32.hmem_stride;
548                 op.user_stride = op32.user_stride;
549                 op.count = op32.count;
550         } else
551 #endif
552                 if (copy_from_user(&op, arg, sizeof(op)))
553                         return -EFAULT;
554
555         if (!op.addr || !op.count || !op.elem_size)
556                 return -EINVAL;
557
558         h = unmarshal_user_handle(op.handle);
559         if (!h)
560                 return -EINVAL;
561
562         nvmap_kmaps_inc(h);
563         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
564                                     op.addr, op.hmem_stride,
565                                     op.user_stride, op.elem_size, op.count);
566         copied = rw_handle(client, h, is_read, op.offset,
567                            (unsigned long)op.addr, op.hmem_stride,
568                            op.user_stride, op.elem_size, op.count);
569         nvmap_kmaps_dec(h);
570
571         if (copied < 0) {
572                 err = copied;
573                 copied = 0;
574         } else if (copied < (op.count * op.elem_size))
575                 err = -EINTR;
576
577 #ifdef CONFIG_COMPAT
578         if (is32)
579                 __put_user(copied, &uarg32->count);
580         else
581 #endif
582                 __put_user(copied, &uarg->count);
583
584         nvmap_handle_put(h);
585
586         return err;
587 }
588
589 static int __nvmap_cache_maint(struct nvmap_client *client,
590                                struct nvmap_cache_op *op)
591 {
592         struct vm_area_struct *vma;
593         struct nvmap_vma_priv *priv;
594         struct nvmap_handle *handle;
595         unsigned long start;
596         unsigned long end;
597         int err = 0;
598
599         if (!op->addr || op->op < NVMAP_CACHE_OP_WB ||
600             op->op > NVMAP_CACHE_OP_WB_INV)
601                 return -EINVAL;
602
603         handle = unmarshal_user_handle(op->handle);
604         if (!handle)
605                 return -EINVAL;
606
607         down_read(&current->mm->mmap_sem);
608
609         vma = find_vma(current->active_mm, (unsigned long)op->addr);
610         if (!vma || !is_nvmap_vma(vma) ||
611             (ulong)op->addr < vma->vm_start ||
612             (ulong)op->addr >= vma->vm_end ||
613             op->len > vma->vm_end - (ulong)op->addr) {
614                 err = -EADDRNOTAVAIL;
615                 goto out;
616         }
617
618         priv = (struct nvmap_vma_priv *)vma->vm_private_data;
619
620         if (priv->handle != handle) {
621                 err = -EFAULT;
622                 goto out;
623         }
624
625         start = (unsigned long)op->addr - vma->vm_start +
626                 (vma->vm_pgoff << PAGE_SHIFT);
627         end = start + op->len;
628
629         err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
630                                      false);
631 out:
632         up_read(&current->mm->mmap_sem);
633         nvmap_handle_put(handle);
634         return err;
635 }
636
637 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
638 {
639         struct nvmap_client *client = filp->private_data;
640         struct nvmap_cache_op op;
641 #ifdef CONFIG_COMPAT
642         struct nvmap_cache_op_32 op32;
643 #endif
644
645 #ifdef CONFIG_COMPAT
646         if (is32) {
647                 if (copy_from_user(&op32, arg, sizeof(op32)))
648                         return -EFAULT;
649                 op.addr = op32.addr;
650                 op.handle = op32.handle;
651                 op.len = op32.len;
652                 op.op = op32.op;
653         } else
654 #endif
655                 if (copy_from_user(&op, arg, sizeof(op)))
656                         return -EFAULT;
657
658         return __nvmap_cache_maint(client, &op);
659 }
660
661 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
662 {
663         struct nvmap_client *client = filp->private_data;
664
665         if (!arg)
666                 return 0;
667
668         nvmap_free_handle_user_id(client, arg);
669         return sys_close(arg);
670 }
671
672 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
673 {
674         if (op == NVMAP_CACHE_OP_WB_INV)
675                 dmac_flush_range(vaddr, vaddr + size);
676         else if (op == NVMAP_CACHE_OP_INV)
677                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
678         else
679                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
680 }
681
682 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
683 {
684         if (op == NVMAP_CACHE_OP_WB_INV)
685                 outer_flush_range(paddr, paddr + size);
686         else if (op == NVMAP_CACHE_OP_INV)
687                 outer_inv_range(paddr, paddr + size);
688         else
689                 outer_clean_range(paddr, paddr + size);
690 }
691
692 static void heap_page_cache_maint(
693         struct nvmap_handle *h, unsigned long start, unsigned long end,
694         unsigned int op, bool inner, bool outer, pte_t **pte,
695         unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
696 {
697         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
698                 /*
699                  * zap user VA->PA mappings so that any access to the pages
700                  * will result in a fault and can be marked dirty
701                  */
702                 nvmap_handle_mkclean(h, start, end-start);
703                 nvmap_zap_handle(h, start, end - start);
704         }
705
706 #ifdef NVMAP_LAZY_VFREE
707         if (inner) {
708                 void *vaddr = NULL;
709
710                 if (!h->vaddr) {
711                         struct page **pages;
712                         /* mutex lock protection is not necessary as it is
713                          * already increased in __nvmap_do_cache_maint to
714                          * protect from migrations.
715                          */
716                         nvmap_kmaps_inc_no_lock(h);
717                         pages = nvmap_pages(h->pgalloc.pages,
718                                             h->size >> PAGE_SHIFT);
719                         if (!pages)
720                                 goto per_page_cache_maint;
721                         vaddr = vm_map_ram(pages,
722                                         h->size >> PAGE_SHIFT, -1, prot);
723                         nvmap_altfree(pages,
724                                 (h->size >> PAGE_SHIFT) * sizeof(*pages));
725                 }
726                 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr)) {
727                         nvmap_kmaps_dec(h);
728                         vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
729                 }
730                 if (h->vaddr) {
731                         /* Fast inner cache maintenance using single mapping */
732                         inner_cache_maint(op, h->vaddr + start, end - start);
733                         if (!outer)
734                                 return;
735                         /* Skip per-page inner maintenance in loop below */
736                         inner = false;
737                 }
738         }
739 per_page_cache_maint:
740         if (!h->vaddr)
741                 nvmap_kmaps_dec(h);
742 #endif
743
744         while (start < end) {
745                 struct page *page;
746                 phys_addr_t paddr;
747                 unsigned long next;
748                 unsigned long off;
749                 size_t size;
750
751                 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
752                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
753                 off = start & ~PAGE_MASK;
754                 size = next - start;
755                 paddr = page_to_phys(page) + off;
756
757                 if (inner) {
758                         void *vaddr = (void *)kaddr + off;
759                         BUG_ON(!pte);
760                         BUG_ON(!kaddr);
761                         set_pte_at(&init_mm, kaddr, *pte,
762                                 pfn_pte(__phys_to_pfn(paddr), prot));
763                         nvmap_flush_tlb_kernel_page(kaddr);
764                         inner_cache_maint(op, vaddr, size);
765                 }
766
767                 if (outer)
768                         outer_cache_maint(op, paddr, size);
769                 start = next;
770         }
771 }
772
773 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
774 static bool fast_cache_maint_outer(unsigned long start,
775                 unsigned long end, unsigned int op)
776 {
777         bool result = false;
778         if (end - start >= cache_maint_outer_threshold) {
779                 if (op == NVMAP_CACHE_OP_WB_INV) {
780                         outer_flush_all();
781                         result = true;
782                 }
783                 if (op == NVMAP_CACHE_OP_WB) {
784                         outer_clean_all();
785                         result = true;
786                 }
787         }
788
789         return result;
790 }
791 #else
792 static inline bool fast_cache_maint_outer(unsigned long start,
793                 unsigned long end, unsigned int op)
794 {
795         return false;
796 }
797 #endif
798
799 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
800 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
801         unsigned long start,
802         unsigned long end, unsigned int op)
803 {
804         if ((op == NVMAP_CACHE_OP_INV) ||
805                 ((end - start) < cache_maint_inner_threshold))
806                 return false;
807         return true;
808 }
809 #else
810 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
811         unsigned long start,
812         unsigned long end, unsigned int op)
813 {
814         return false;
815 }
816 #endif
817
818 static bool fast_cache_maint(struct nvmap_handle *h,
819         unsigned long start,
820         unsigned long end, unsigned int op,
821         bool clean_only_dirty)
822 {
823         if (!can_fast_cache_maint(h, start, end, op))
824                 return false;
825
826         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
827                 nvmap_handle_mkclean(h, 0, h->size);
828                 nvmap_zap_handle(h, 0, h->size);
829         }
830
831         if (op == NVMAP_CACHE_OP_WB_INV)
832                 inner_flush_cache_all();
833         else if (op == NVMAP_CACHE_OP_WB)
834                 inner_clean_cache_all();
835
836         /* outer maintenance */
837         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
838                 if(!fast_cache_maint_outer(start, end, op))
839                 {
840                         if (h->heap_pgalloc) {
841                                 heap_page_cache_maint(h, start,
842                                         end, op, false, true, NULL, 0, 0,
843                                         clean_only_dirty);
844                         } else  {
845                                 phys_addr_t pstart;
846
847                                 pstart = start + h->carveout->base;
848                                 outer_cache_maint(op, pstart, end - start);
849                         }
850                 }
851         }
852         return true;
853 }
854
855 struct cache_maint_op {
856         phys_addr_t start;
857         phys_addr_t end;
858         unsigned int op;
859         struct nvmap_handle *h;
860         bool inner;
861         bool outer;
862         bool clean_only_dirty;
863 };
864
865 static int do_cache_maint(struct cache_maint_op *cache_work)
866 {
867         pgprot_t prot;
868         pte_t **pte = NULL;
869         unsigned long kaddr;
870         phys_addr_t pstart = cache_work->start;
871         phys_addr_t pend = cache_work->end;
872         phys_addr_t loop;
873         int err = 0;
874         struct nvmap_handle *h = cache_work->h;
875         struct nvmap_client *client;
876         unsigned int op = cache_work->op;
877
878         if (!h || !h->alloc)
879                 return -EFAULT;
880
881         client = h->owner;
882         if (can_fast_cache_maint(h, pstart, pend, op))
883                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
884         else
885                 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
886         trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
887         trace_nvmap_cache_flush(pend - pstart,
888                 nvmap_stats_read(NS_ALLOC),
889                 nvmap_stats_read(NS_CFLUSH_RQ),
890                 nvmap_stats_read(NS_CFLUSH_DONE));
891
892         wmb();
893         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
894             h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
895                 goto out;
896
897         if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
898                 goto out;
899
900         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
901         pte = nvmap_alloc_pte(h->dev, (void **)&kaddr);
902         if (IS_ERR(pte)) {
903                 err = PTR_ERR(pte);
904                 pte = NULL;
905                 goto out;
906         }
907
908         if (h->heap_pgalloc) {
909                 heap_page_cache_maint(h, pstart, pend, op, true,
910                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
911                                         false : true,
912                         pte, kaddr, prot,
913                         cache_work->clean_only_dirty);
914                 goto out;
915         }
916
917         if (pstart > h->size || pend > h->size) {
918                 pr_warn("cache maintenance outside handle\n");
919                 err = -EINVAL;
920                 goto out;
921         }
922
923         pstart += h->carveout->base;
924         pend += h->carveout->base;
925         loop = pstart;
926
927         while (loop < pend) {
928                 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
929                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
930                 next = min(next, pend);
931
932                 set_pte_at(&init_mm, kaddr, *pte,
933                            pfn_pte(__phys_to_pfn(loop), prot));
934                 nvmap_flush_tlb_kernel_page(kaddr);
935
936                 inner_cache_maint(op, base, next - loop);
937                 loop = next;
938         }
939
940         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
941                 outer_cache_maint(op, pstart, pend - pstart);
942
943 out:
944         if (pte)
945                 nvmap_free_pte(h->dev, pte);
946         return err;
947 }
948
949 int __nvmap_do_cache_maint(struct nvmap_client *client,
950                         struct nvmap_handle *h,
951                         unsigned long start, unsigned long end,
952                         unsigned int op, bool clean_only_dirty)
953 {
954         int err;
955         struct cache_maint_op cache_op;
956
957         h = nvmap_handle_get(h);
958         if (!h)
959                 return -EFAULT;
960
961         nvmap_kmaps_inc(h);
962         if (op == NVMAP_CACHE_OP_INV)
963                 op = NVMAP_CACHE_OP_WB_INV;
964
965         /* clean only dirty is applicable only for Write Back operation */
966         if (op != NVMAP_CACHE_OP_WB)
967                 clean_only_dirty = false;
968
969         cache_op.h = h;
970         cache_op.start = start;
971         cache_op.end = end;
972         cache_op.op = op;
973         cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
974                          h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
975         cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
976         cache_op.clean_only_dirty = clean_only_dirty;
977
978         nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
979         err = do_cache_maint(&cache_op);
980         nvmap_kmaps_dec(h);
981         nvmap_handle_put(h);
982         return err;
983 }
984
985 static int rw_handle_page(struct nvmap_handle *h, int is_read,
986                           unsigned long start, unsigned long rw_addr,
987                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
988 {
989         pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
990         unsigned long end = start + bytes;
991         int err = 0;
992
993         while (!err && start < end) {
994                 struct page *page = NULL;
995                 phys_addr_t phys;
996                 size_t count;
997                 void *src;
998
999                 if (!h->heap_pgalloc) {
1000                         phys = h->carveout->base + start;
1001                 } else {
1002                         page =
1003                            nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1004                         BUG_ON(!page);
1005                         get_page(page);
1006                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
1007                 }
1008
1009                 set_pte_at(&init_mm, kaddr, pte,
1010                            pfn_pte(__phys_to_pfn(phys), prot));
1011                 nvmap_flush_tlb_kernel_page(kaddr);
1012
1013                 src = (void *)kaddr + (phys & ~PAGE_MASK);
1014                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1015                 count = min_t(size_t, end - start, phys);
1016
1017                 if (is_read)
1018                         err = copy_to_user((void *)rw_addr, src, count);
1019                 else
1020                         err = copy_from_user(src, (void *)rw_addr, count);
1021
1022                 if (err)
1023                         err = -EFAULT;
1024
1025                 rw_addr += count;
1026                 start += count;
1027
1028                 if (page)
1029                         put_page(page);
1030         }
1031
1032         return err;
1033 }
1034
1035 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1036                          int is_read, unsigned long h_offs,
1037                          unsigned long sys_addr, unsigned long h_stride,
1038                          unsigned long sys_stride, unsigned long elem_size,
1039                          unsigned long count)
1040 {
1041         ssize_t copied = 0;
1042         pte_t **pte;
1043         void *addr;
1044         int ret = 0;
1045
1046         if (!elem_size)
1047                 return -EINVAL;
1048
1049         if (!h->alloc)
1050                 return -EFAULT;
1051
1052         if (elem_size == h_stride && elem_size == sys_stride) {
1053                 elem_size *= count;
1054                 h_stride = elem_size;
1055                 sys_stride = elem_size;
1056                 count = 1;
1057         }
1058
1059         pte = nvmap_alloc_pte(nvmap_dev, &addr);
1060         if (IS_ERR(pte))
1061                 return PTR_ERR(pte);
1062
1063         while (count--) {
1064                 if (h_offs + elem_size > h->size) {
1065                         nvmap_warn(client, "read/write outside of handle\n");
1066                         ret = -EFAULT;
1067                         break;
1068                 }
1069                 if (is_read)
1070                         __nvmap_do_cache_maint(client, h, h_offs,
1071                                 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1072
1073                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1074                                      elem_size, (unsigned long)addr, *pte);
1075
1076                 if (ret)
1077                         break;
1078
1079                 if (!is_read)
1080                         __nvmap_do_cache_maint(client, h, h_offs,
1081                                 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1082                                 false);
1083
1084                 copied += elem_size;
1085                 sys_addr += sys_stride;
1086                 h_offs += h_stride;
1087         }
1088
1089         nvmap_free_pte(nvmap_dev, pte);
1090         return ret ?: copied;
1091 }
1092
1093 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1094                                  bool is_reserve_ioctl)
1095 {
1096         struct nvmap_cache_op_list op;
1097         u32 *handle_ptr;
1098         u32 *offset_ptr;
1099         u32 *size_ptr;
1100         struct nvmap_handle **refs;
1101         int err = 0;
1102         u32 i, n_unmarshal_handles = 0;
1103
1104         if (copy_from_user(&op, arg, sizeof(op)))
1105                 return -EFAULT;
1106
1107         if (!op.nr)
1108                 return -EINVAL;
1109
1110         if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1111                 return -EFAULT;
1112
1113         if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1114                 return -EFAULT;
1115
1116         if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1117                 return -EFAULT;
1118
1119         if (!op.offsets || !op.sizes)
1120                 return -EINVAL;
1121
1122         refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1123
1124         if (!refs)
1125                 return -ENOMEM;
1126
1127         handle_ptr = (u32 *)(uintptr_t)op.handles;
1128         offset_ptr = (u32 *)(uintptr_t)op.offsets;
1129         size_ptr = (u32 *)(uintptr_t)op.sizes;
1130
1131         for (i = 0; i < op.nr; i++) {
1132                 u32 handle;
1133
1134                 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1135                         err = -EFAULT;
1136                         goto free_mem;
1137                 }
1138
1139                 refs[i] = unmarshal_user_handle(handle);
1140                 if (!refs[i]) {
1141                         err = -EINVAL;
1142                         goto free_mem;
1143                 }
1144                 n_unmarshal_handles++;
1145         }
1146
1147         if (is_reserve_ioctl)
1148                 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1149                                           op.nr, op.op);
1150         else
1151                 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,
1152                                                 op.op, op.nr);
1153
1154 free_mem:
1155         for (i = 0; i < n_unmarshal_handles; i++)
1156                 nvmap_handle_put(refs[i]);
1157         kfree(refs);
1158         return err;
1159 }
1160