]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_ioctl.c
video: tegra: nvmap: cleanup redundant functions
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "nvmap: %s() " fmt, __func__
24
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nvmap.h>
32 #include <linux/vmalloc.h>
33
34 #include <asm/memory.h>
35
36 #include <trace/events/nvmap.h>
37
38 #include "nvmap_ioctl.h"
39 #include "nvmap_priv.h"
40
41 #include <linux/list.h>
42
43 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
44                          int is_read, unsigned long h_offs,
45                          unsigned long sys_addr, unsigned long h_stride,
46                          unsigned long sys_stride, unsigned long elem_size,
47                          unsigned long count);
48
49 struct nvmap_handle *unmarshal_user_handle(__u32 handle)
50 {
51         struct nvmap_handle *h;
52
53         h = nvmap_get_id_from_dmabuf_fd(NULL, (int)handle);
54         if (!IS_ERR(h))
55                 return h;
56         return 0;
57 }
58
59 /*
60  * marshal_id/unmarshal_id are for get_id/handle_from_id.
61  * These are added to support using Fd's for handle.
62  */
63 #ifdef CONFIG_ARM64
64 static __u32 marshal_id(struct nvmap_handle *handle)
65 {
66         return (__u32)((uintptr_t)handle >> 2);
67 }
68
69 static struct nvmap_handle *unmarshal_id(__u32 id)
70 {
71         uintptr_t h = ((id << 2) | PAGE_OFFSET);
72
73         return (struct nvmap_handle *)h;
74 }
75 #else
76 static __u32 marshal_id(struct nvmap_handle *handle)
77 {
78         return (uintptr_t)handle;
79 }
80
81 static struct nvmap_handle *unmarshal_id(__u32 id)
82 {
83         return (struct nvmap_handle *)id;
84 }
85 #endif
86
87 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
88 {
89         if (!virt_addr_valid(ref))
90                 return 0;
91         return ref->handle;
92 }
93
94 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
95                       bool is32)
96 {
97 #ifdef CONFIG_COMPAT
98         struct nvmap_pin_handle_32 op32;
99         __u32 __user *output32 = NULL;
100 #endif
101         struct nvmap_pin_handle op;
102         struct nvmap_handle *h;
103         struct nvmap_handle *on_stack[16];
104         struct nvmap_handle **refs;
105         unsigned long __user *output;
106         unsigned int i;
107         int err = 0;
108
109 #ifdef CONFIG_COMPAT
110         if (is32) {
111                 if (copy_from_user(&op32, arg, sizeof(op32)))
112                         return -EFAULT;
113                 op.handles = (__u32 *)(uintptr_t)op32.handles;
114                 op.count = op32.count;
115         } else
116 #endif
117                 if (copy_from_user(&op, arg, sizeof(op)))
118                         return -EFAULT;
119
120         if (!op.count)
121                 return -EINVAL;
122
123         if (op.count > 1) {
124                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
125
126                 if (op.count > ARRAY_SIZE(on_stack))
127                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
128                 else
129                         refs = on_stack;
130
131                 if (!refs)
132                         return -ENOMEM;
133
134                 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
135                         err = -EFAULT;
136                         goto out;
137                 }
138
139                 for (i = 0; i < op.count; i++) {
140                         u32 handle;
141                         if (__get_user(handle, &op.handles[i])) {
142                                 err = -EFAULT;
143                                 goto out;
144                         }
145                         refs[i] = unmarshal_user_handle(handle);
146                         if (!refs[i]) {
147                                 err = -EINVAL;
148                                 goto out;
149                         }
150                 }
151         } else {
152                 refs = on_stack;
153
154                 /* Yes, we're storing a u32 in a pointer */
155                 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
156                 if (!on_stack[0]) {
157                         err = -EINVAL;
158                         goto out;
159                 }
160         }
161
162         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
163         if (is_pin)
164                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
165         else
166                 nvmap_unpin_ids(filp->private_data, op.count, refs);
167
168         /* skip the output stage on unpin */
169         if (err || !is_pin)
170                 goto out;
171
172         /* it is guaranteed that if nvmap_pin_ids returns 0 that
173          * all of the handle_ref objects are valid, so dereferencing
174          * directly here is safe */
175 #ifdef CONFIG_COMPAT
176         if (is32) {
177                 if (op.count > 1)
178                         output32 = (__u32 *)(uintptr_t)op.addr;
179                 else {
180                         struct nvmap_pin_handle_32 __user *tmp = arg;
181                         output32 = &tmp->addr;
182                 }
183
184                 if (!output32)
185                         goto out;
186         } else
187 #endif
188         {
189                 if (op.count > 1)
190                         output = op.addr;
191                 else {
192                         struct nvmap_pin_handle __user *tmp = arg;
193                         output = (unsigned long *)&tmp->addr;
194                 }
195
196                 if (!output)
197                         goto out;
198         }
199
200         for (i = 0; i < op.count && !err; i++) {
201                 unsigned long addr;
202
203                 h = refs[i];
204                 if (h->heap_pgalloc && h->pgalloc.contig)
205                         addr = page_to_phys(h->pgalloc.pages[0]);
206                 else if (h->heap_pgalloc)
207                         addr = sg_dma_address(
208                                 ((struct sg_table *)h->attachment->priv)->sgl);
209                 else
210                         addr = h->carveout->base;
211
212 #ifdef CONFIG_COMPAT
213                 if (is32)
214                         err = put_user((__u32)addr, &output32[i]);
215                 else
216 #endif
217                         err = put_user(addr, &output[i]);
218         }
219
220         if (err)
221                 nvmap_unpin_ids(filp->private_data, op.count, refs);
222
223 out:
224         if (refs != on_stack)
225                 kfree(refs);
226
227         return err;
228 }
229
230 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
231 {
232         struct nvmap_client *client = filp->private_data;
233         struct nvmap_create_handle op;
234         struct nvmap_handle *h = NULL;
235
236         if (copy_from_user(&op, arg, sizeof(op)))
237                 return -EFAULT;
238
239         h = unmarshal_user_handle(op.handle);
240         if (!h)
241                 return -EINVAL;
242
243         h = nvmap_handle_get(h);
244
245         if (!h)
246                 return -EPERM;
247
248         op.id = marshal_id(h);
249         if (client == h->owner)
250                 h->global = true;
251
252         nvmap_handle_put(h);
253
254         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
255 }
256
257 static int nvmap_share_release(struct inode *inode, struct file *file)
258 {
259         struct nvmap_handle *h = file->private_data;
260
261         nvmap_handle_put(h);
262         return 0;
263 }
264
265 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
266 {
267         /* unsupported operation */
268         WARN(1, "mmap is not supported on fd, which shares nvmap handle");
269         return -EPERM;
270 }
271
272 const struct file_operations nvmap_fd_fops = {
273         .owner          = THIS_MODULE,
274         .release        = nvmap_share_release,
275         .mmap           = nvmap_share_mmap,
276 };
277
278 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
279 {
280         struct nvmap_handle *handle;
281         struct nvmap_create_handle op;
282         struct nvmap_client *client = filp->private_data;
283
284         if (copy_from_user(&op, arg, sizeof(op)))
285                 return -EFAULT;
286
287         handle = unmarshal_user_handle(op.handle);
288         if (!handle)
289                 return -EINVAL;
290
291         op.fd = nvmap_get_dmabuf_fd(client, handle);
292         if (op.fd < 0)
293                 return op.fd;
294
295         if (copy_to_user(arg, &op, sizeof(op))) {
296                 sys_close(op.fd);
297                 return -EFAULT;
298         }
299         return 0;
300 }
301
302 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
303 {
304         struct nvmap_alloc_handle op;
305         struct nvmap_client *client = filp->private_data;
306         struct nvmap_handle *handle;
307
308         if (copy_from_user(&op, arg, sizeof(op)))
309                 return -EFAULT;
310
311         handle = unmarshal_user_handle(op.handle);
312         if (!handle)
313                 return -EINVAL;
314
315         if (op.align & (op.align - 1))
316                 return -EINVAL;
317
318         /* user-space handles are aligned to page boundaries, to prevent
319          * data leakage. */
320         op.align = max_t(size_t, op.align, PAGE_SIZE);
321 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
322         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
323 #endif
324
325         return nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
326                                   0, /* no kind */
327                                   op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
328 }
329
330 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
331 {
332         struct nvmap_alloc_kind_handle op;
333         struct nvmap_client *client = filp->private_data;
334         struct nvmap_handle *handle;
335
336         if (copy_from_user(&op, arg, sizeof(op)))
337                 return -EFAULT;
338
339         handle = unmarshal_user_handle(op.handle);
340         if (!handle)
341                 return -EINVAL;
342
343         if (op.align & (op.align - 1))
344                 return -EINVAL;
345
346         /* user-space handles are aligned to page boundaries, to prevent
347          * data leakage. */
348         op.align = max_t(size_t, op.align, PAGE_SIZE);
349 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
350         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
351 #endif
352
353         return nvmap_alloc_handle(client, handle,
354                                   op.heap_mask,
355                                   op.align,
356                                   op.kind,
357                                   op.flags);
358 }
359
360 int nvmap_create_fd(struct nvmap_handle *h)
361 {
362         int fd;
363
364         fd = __nvmap_dmabuf_fd(h->dmabuf, O_CLOEXEC);
365         BUG_ON(fd == 0);
366         if (fd < 0) {
367                 pr_err("Out of file descriptors");
368                 return fd;
369         }
370         /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
371          * fd close drops one ref count on dmabuf->file *.
372          * to balance ref count, ref count dma_buf.
373          */
374         get_dma_buf(h->dmabuf);
375         return fd;
376 }
377
378 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
379 {
380         struct nvmap_create_handle op;
381         struct nvmap_handle_ref *ref = NULL;
382         struct nvmap_client *client = filp->private_data;
383         int err = 0;
384         int fd = 0;
385
386         if (copy_from_user(&op, arg, sizeof(op)))
387                 return -EFAULT;
388
389         if (!client)
390                 return -ENODEV;
391
392         if (cmd == NVMAP_IOC_CREATE) {
393                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
394                 if (!IS_ERR(ref))
395                         ref->handle->orig_size = op.size;
396         } else if (cmd == NVMAP_IOC_FROM_ID) {
397                 ref = nvmap_duplicate_handle(client, unmarshal_id(op.id), 0);
398         } else if (cmd == NVMAP_IOC_FROM_FD) {
399                 ref = nvmap_create_handle_from_fd(client, op.fd);
400         } else {
401                 return -EINVAL;
402         }
403
404         if (IS_ERR(ref))
405                 return PTR_ERR(ref);
406
407         fd = nvmap_create_fd(ref->handle);
408         if (fd < 0)
409                 err = fd;
410
411         op.handle = fd;
412
413         if (copy_to_user(arg, &op, sizeof(op))) {
414                 err = -EFAULT;
415                 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
416         }
417
418         if (err && fd > 0)
419                 sys_close(fd);
420         return err;
421 }
422
423 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
424 {
425         struct nvmap_client *client = filp->private_data;
426         struct nvmap_map_caller op;
427 #ifdef CONFIG_COMPAT
428         struct nvmap_map_caller_32 op32;
429 #endif
430         struct nvmap_vma_priv *priv;
431         struct vm_area_struct *vma;
432         struct nvmap_handle *h = NULL;
433         int err = 0;
434
435 #ifdef CONFIG_COMPAT
436         if (is32) {
437                 if (copy_from_user(&op32, arg, sizeof(op32)))
438                         return -EFAULT;
439                 op.handle = op32.handle;
440                 op.offset = op32.offset;
441                 op.length = op32.length;
442                 op.flags = op32.length;
443                 op.addr = op32.addr;
444         } else
445 #endif
446                 if (copy_from_user(&op, arg, sizeof(op)))
447                         return -EFAULT;
448
449         h = unmarshal_user_handle(op.handle);
450
451         if (!h)
452                 return -EINVAL;
453
454         h = nvmap_handle_get(h);
455
456         if (!h)
457                 return -EPERM;
458
459         if(!h->alloc) {
460                 nvmap_handle_put(h);
461                 return -EFAULT;
462         }
463
464         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
465                                         op.length, op.flags);
466         down_read(&current->mm->mmap_sem);
467
468         vma = find_vma(current->mm, op.addr);
469         if (!vma) {
470                 err = -ENOMEM;
471                 goto out;
472         }
473
474         if (op.offset & ~PAGE_MASK) {
475                 err = -EFAULT;
476                 goto out;
477         }
478
479         if (op.offset >= h->size || op.length > h->size - op.offset) {
480                 err = -EADDRNOTAVAIL;
481                 goto out;
482         }
483
484         /* the VMA must exactly match the requested mapping operation, and the
485          * VMA that is targetted must have been created by this driver
486          */
487         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
488             (vma->vm_end-vma->vm_start != op.length)) {
489                 err = -EPERM;
490                 goto out;
491         }
492
493         /* verify that each mmap() system call creates a unique VMA */
494         if (vma->vm_private_data)
495                 goto out;
496
497         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
498                 err = -EFAULT;
499                 goto out;
500         }
501
502         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
503         if (!priv)  {
504                 err = -ENOMEM;
505                 goto out;
506         }
507
508         vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
509         priv->handle = h;
510         priv->offs = op.offset;
511         vma->vm_private_data = priv;
512         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
513         nvmap_vma_open(vma);
514
515 out:
516         up_read(&current->mm->mmap_sem);
517
518         if (err)
519                 nvmap_handle_put(h);
520         return err;
521 }
522
523 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
524 {
525 #ifdef CONFIG_COMPAT
526         struct nvmap_handle_param_32 __user *uarg32 = arg;
527 #endif
528         struct nvmap_handle_param __user *uarg = arg;
529         struct nvmap_handle_param op;
530         struct nvmap_client *client = filp->private_data;
531         struct nvmap_handle_ref *ref;
532         struct nvmap_handle *h;
533         u64 result;
534         int err = 0;
535
536 #ifdef CONFIG_COMPAT
537         /* This is safe because the incoming value of result doesn't matter */
538         if (is32) {
539                 if (copy_from_user(&op, arg,
540                                 sizeof(struct nvmap_handle_param_32)))
541                         return -EFAULT;
542         } else
543 #endif
544                 if (copy_from_user(&op, arg, sizeof(op)))
545                         return -EFAULT;
546
547         h = unmarshal_user_handle(op.handle);
548         if (!h)
549                 return -EINVAL;
550
551         h = nvmap_handle_get(h);
552         if (!h)
553                 return -EINVAL;
554
555         nvmap_ref_lock(client);
556         ref = __nvmap_validate_locked(client, h);
557         if (IS_ERR_OR_NULL(ref)) {
558                 err = ref ? PTR_ERR(ref) : -EINVAL;
559                 goto ref_fail;
560         }
561
562         err = nvmap_get_handle_param(client, ref, op.param, &result);
563
564 #ifdef CONFIG_COMPAT
565         if (is32)
566                 err = put_user((__u32)result, &uarg32->result);
567         else
568 #endif
569                 err = put_user((unsigned long)result, &uarg->result);
570
571 ref_fail:
572         nvmap_ref_unlock(client);
573         nvmap_handle_put(h);
574         return err;
575 }
576
577 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
578                           bool is32)
579 {
580         struct nvmap_client *client = filp->private_data;
581         struct nvmap_rw_handle __user *uarg = arg;
582         struct nvmap_rw_handle op;
583 #ifdef CONFIG_COMPAT
584         struct nvmap_rw_handle_32 __user *uarg32 = arg;
585         struct nvmap_rw_handle_32 op32;
586 #endif
587         struct nvmap_handle *h;
588         ssize_t copied;
589         int err = 0;
590
591 #ifdef CONFIG_COMPAT
592         if (is32) {
593                 if (copy_from_user(&op32, arg, sizeof(op32)))
594                         return -EFAULT;
595                 op.addr = op32.addr;
596                 op.handle = op32.handle;
597                 op.offset = op32.offset;
598                 op.elem_size = op32.elem_size;
599                 op.hmem_stride = op32.hmem_stride;
600                 op.user_stride = op32.user_stride;
601                 op.count = op32.count;
602         } else
603 #endif
604                 if (copy_from_user(&op, arg, sizeof(op)))
605                         return -EFAULT;
606
607         h = unmarshal_user_handle(op.handle);
608         if (!h || !op.addr || !op.count || !op.elem_size)
609                 return -EINVAL;
610
611         h = nvmap_handle_get(h);
612         if (!h)
613                 return -EPERM;
614
615         nvmap_kmaps_inc(h);
616         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
617                                     op.addr, op.hmem_stride,
618                                     op.user_stride, op.elem_size, op.count);
619         copied = rw_handle(client, h, is_read, op.offset,
620                            (unsigned long)op.addr, op.hmem_stride,
621                            op.user_stride, op.elem_size, op.count);
622         nvmap_kmaps_dec(h);
623
624         if (copied < 0) {
625                 err = copied;
626                 copied = 0;
627         } else if (copied < (op.count * op.elem_size))
628                 err = -EINTR;
629
630 #ifdef CONFIG_COMPAT
631         if (is32)
632                 __put_user(copied, &uarg32->count);
633         else
634 #endif
635                 __put_user(copied, &uarg->count);
636
637         nvmap_handle_put(h);
638
639         return err;
640 }
641
642 static int __nvmap_cache_maint(struct nvmap_client *client,
643                                struct nvmap_cache_op *op)
644 {
645         struct vm_area_struct *vma;
646         struct nvmap_vma_priv *priv;
647         struct nvmap_handle *handle;
648         unsigned long start;
649         unsigned long end;
650         int err = 0;
651
652         handle = unmarshal_user_handle(op->handle);
653         if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB ||
654             op->op > NVMAP_CACHE_OP_WB_INV)
655                 return -EINVAL;
656
657         down_read(&current->mm->mmap_sem);
658
659         vma = find_vma(current->active_mm, (unsigned long)op->addr);
660         if (!vma || !is_nvmap_vma(vma) ||
661             (ulong)op->addr < vma->vm_start ||
662             (ulong)op->addr >= vma->vm_end ||
663             op->len > vma->vm_end - (ulong)op->addr) {
664                 err = -EADDRNOTAVAIL;
665                 goto out;
666         }
667
668         priv = (struct nvmap_vma_priv *)vma->vm_private_data;
669
670         if (priv->handle != handle) {
671                 err = -EFAULT;
672                 goto out;
673         }
674
675         start = (unsigned long)op->addr - vma->vm_start +
676                 (vma->vm_pgoff << PAGE_SHIFT);
677         end = start + op->len;
678
679         err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
680                                      false);
681 out:
682         up_read(&current->mm->mmap_sem);
683         return err;
684 }
685
686 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
687 {
688         struct nvmap_client *client = filp->private_data;
689         struct nvmap_cache_op op;
690 #ifdef CONFIG_COMPAT
691         struct nvmap_cache_op_32 op32;
692 #endif
693
694 #ifdef CONFIG_COMPAT
695         if (is32) {
696                 if (copy_from_user(&op32, arg, sizeof(op32)))
697                         return -EFAULT;
698                 op.addr = op32.addr;
699                 op.handle = op32.handle;
700                 op.len = op32.len;
701                 op.op = op32.op;
702         } else
703 #endif
704                 if (copy_from_user(&op, arg, sizeof(op)))
705                         return -EFAULT;
706
707         return __nvmap_cache_maint(client, &op);
708 }
709
710 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
711 {
712         struct nvmap_client *client = filp->private_data;
713
714         if (!arg)
715                 return 0;
716
717         nvmap_free_handle_user_id(client, arg);
718         return sys_close(arg);
719 }
720
721 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
722 {
723         if (op == NVMAP_CACHE_OP_WB_INV)
724                 dmac_flush_range(vaddr, vaddr + size);
725         else if (op == NVMAP_CACHE_OP_INV)
726                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
727         else
728                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
729 }
730
731 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
732 {
733         if (op == NVMAP_CACHE_OP_WB_INV)
734                 outer_flush_range(paddr, paddr + size);
735         else if (op == NVMAP_CACHE_OP_INV)
736                 outer_inv_range(paddr, paddr + size);
737         else
738                 outer_clean_range(paddr, paddr + size);
739 }
740
741 static void heap_page_cache_maint(
742         struct nvmap_handle *h, unsigned long start, unsigned long end,
743         unsigned int op, bool inner, bool outer, pte_t **pte,
744         unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
745 {
746         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
747                 /*
748                  * zap user VA->PA mappings so that any access to the pages
749                  * will result in a fault and can be marked dirty
750                  */
751                 nvmap_handle_mkclean(h, start, end-start);
752                 nvmap_zap_handle(h, start, end - start);
753         }
754
755 #ifdef NVMAP_LAZY_VFREE
756         if (inner) {
757                 void *vaddr = NULL;
758
759                 if (!h->vaddr) {
760                         struct page **pages;
761                         /* mutex lock protection is not necessary as it is
762                          * already increased in __nvmap_do_cache_maint to
763                          * protect from migrations.
764                          */
765                         nvmap_kmaps_inc_no_lock(h);
766                         pages = nvmap_pages(h->pgalloc.pages,
767                                             h->size >> PAGE_SHIFT);
768                         if (!pages)
769                                 goto per_page_cache_maint;
770                         vaddr = vm_map_ram(pages,
771                                         h->size >> PAGE_SHIFT, -1, prot);
772                         nvmap_altfree(pages,
773                                 (h->size >> PAGE_SHIFT) * sizeof(*pages));
774                 }
775                 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr)) {
776                         nvmap_kmaps_dec(h);
777                         vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
778                 }
779                 if (h->vaddr) {
780                         /* Fast inner cache maintenance using single mapping */
781                         inner_cache_maint(op, h->vaddr + start, end - start);
782                         if (!outer)
783                                 return;
784                         /* Skip per-page inner maintenance in loop below */
785                         inner = false;
786                 }
787         }
788 per_page_cache_maint:
789         if (!h->vaddr)
790                 nvmap_kmaps_dec(h);
791 #endif
792
793         while (start < end) {
794                 struct page *page;
795                 phys_addr_t paddr;
796                 unsigned long next;
797                 unsigned long off;
798                 size_t size;
799
800                 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
801                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
802                 off = start & ~PAGE_MASK;
803                 size = next - start;
804                 paddr = page_to_phys(page) + off;
805
806                 if (inner) {
807                         void *vaddr = (void *)kaddr + off;
808                         BUG_ON(!pte);
809                         BUG_ON(!kaddr);
810                         set_pte_at(&init_mm, kaddr, *pte,
811                                 pfn_pte(__phys_to_pfn(paddr), prot));
812                         nvmap_flush_tlb_kernel_page(kaddr);
813                         inner_cache_maint(op, vaddr, size);
814                 }
815
816                 if (outer)
817                         outer_cache_maint(op, paddr, size);
818                 start = next;
819         }
820 }
821
822 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
823 static bool fast_cache_maint_outer(unsigned long start,
824                 unsigned long end, unsigned int op)
825 {
826         bool result = false;
827         if (end - start >= cache_maint_outer_threshold) {
828                 if (op == NVMAP_CACHE_OP_WB_INV) {
829                         outer_flush_all();
830                         result = true;
831                 }
832                 if (op == NVMAP_CACHE_OP_WB) {
833                         outer_clean_all();
834                         result = true;
835                 }
836         }
837
838         return result;
839 }
840 #else
841 static inline bool fast_cache_maint_outer(unsigned long start,
842                 unsigned long end, unsigned int op)
843 {
844         return false;
845 }
846 #endif
847
848 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
849 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
850         unsigned long start,
851         unsigned long end, unsigned int op)
852 {
853         if ((op == NVMAP_CACHE_OP_INV) ||
854                 ((end - start) < cache_maint_inner_threshold))
855                 return false;
856         return true;
857 }
858 #else
859 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
860         unsigned long start,
861         unsigned long end, unsigned int op)
862 {
863         return false;
864 }
865 #endif
866
867 static bool fast_cache_maint(struct nvmap_handle *h,
868         unsigned long start,
869         unsigned long end, unsigned int op,
870         bool clean_only_dirty)
871 {
872         if (!can_fast_cache_maint(h, start, end, op))
873                 return false;
874
875         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
876                 nvmap_handle_mkclean(h, 0, h->size);
877                 nvmap_zap_handle(h, 0, h->size);
878         }
879
880         if (op == NVMAP_CACHE_OP_WB_INV)
881                 inner_flush_cache_all();
882         else if (op == NVMAP_CACHE_OP_WB)
883                 inner_clean_cache_all();
884
885         /* outer maintenance */
886         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
887                 if(!fast_cache_maint_outer(start, end, op))
888                 {
889                         if (h->heap_pgalloc) {
890                                 heap_page_cache_maint(h, start,
891                                         end, op, false, true, NULL, 0, 0,
892                                         clean_only_dirty);
893                         } else  {
894                                 phys_addr_t pstart;
895
896                                 pstart = start + h->carveout->base;
897                                 outer_cache_maint(op, pstart, end - start);
898                         }
899                 }
900         }
901         return true;
902 }
903
904 struct cache_maint_op {
905         phys_addr_t start;
906         phys_addr_t end;
907         unsigned int op;
908         struct nvmap_handle *h;
909         bool inner;
910         bool outer;
911         bool clean_only_dirty;
912 };
913
914 static int do_cache_maint(struct cache_maint_op *cache_work)
915 {
916         pgprot_t prot;
917         pte_t **pte = NULL;
918         unsigned long kaddr;
919         phys_addr_t pstart = cache_work->start;
920         phys_addr_t pend = cache_work->end;
921         phys_addr_t loop;
922         int err = 0;
923         struct nvmap_handle *h = cache_work->h;
924         struct nvmap_client *client;
925         unsigned int op = cache_work->op;
926
927         if (!h || !h->alloc)
928                 return -EFAULT;
929
930         client = h->owner;
931         if (can_fast_cache_maint(h, pstart, pend, op))
932                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
933         else
934                 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
935         trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
936         trace_nvmap_cache_flush(pend - pstart,
937                 nvmap_stats_read(NS_ALLOC),
938                 nvmap_stats_read(NS_CFLUSH_RQ),
939                 nvmap_stats_read(NS_CFLUSH_DONE));
940
941         wmb();
942         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
943             h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
944                 goto out;
945
946         if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
947                 goto out;
948
949         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
950         pte = nvmap_alloc_pte(h->dev, (void **)&kaddr);
951         if (IS_ERR(pte)) {
952                 err = PTR_ERR(pte);
953                 pte = NULL;
954                 goto out;
955         }
956
957         if (h->heap_pgalloc) {
958                 heap_page_cache_maint(h, pstart, pend, op, true,
959                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
960                                         false : true,
961                         pte, kaddr, prot,
962                         cache_work->clean_only_dirty);
963                 goto out;
964         }
965
966         if (pstart > h->size || pend > h->size) {
967                 pr_warn("cache maintenance outside handle\n");
968                 err = -EINVAL;
969                 goto out;
970         }
971
972         pstart += h->carveout->base;
973         pend += h->carveout->base;
974         loop = pstart;
975
976         while (loop < pend) {
977                 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
978                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
979                 next = min(next, pend);
980
981                 set_pte_at(&init_mm, kaddr, *pte,
982                            pfn_pte(__phys_to_pfn(loop), prot));
983                 nvmap_flush_tlb_kernel_page(kaddr);
984
985                 inner_cache_maint(op, base, next - loop);
986                 loop = next;
987         }
988
989         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
990                 outer_cache_maint(op, pstart, pend - pstart);
991
992 out:
993         if (pte)
994                 nvmap_free_pte(h->dev, pte);
995         return err;
996 }
997
998 int __nvmap_do_cache_maint(struct nvmap_client *client,
999                         struct nvmap_handle *h,
1000                         unsigned long start, unsigned long end,
1001                         unsigned int op, bool clean_only_dirty)
1002 {
1003         int err;
1004         struct cache_maint_op cache_op;
1005
1006         h = nvmap_handle_get(h);
1007         if (!h)
1008                 return -EFAULT;
1009
1010         nvmap_kmaps_inc(h);
1011         if (op == NVMAP_CACHE_OP_INV)
1012                 op = NVMAP_CACHE_OP_WB_INV;
1013
1014         /* clean only dirty is applicable only for Write Back operation */
1015         if (op != NVMAP_CACHE_OP_WB)
1016                 clean_only_dirty = false;
1017
1018         cache_op.h = h;
1019         cache_op.start = start;
1020         cache_op.end = end;
1021         cache_op.op = op;
1022         cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
1023                          h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1024         cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
1025         cache_op.clean_only_dirty = clean_only_dirty;
1026
1027         nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
1028         err = do_cache_maint(&cache_op);
1029         nvmap_kmaps_dec(h);
1030         nvmap_handle_put(h);
1031         return err;
1032 }
1033
1034 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1035                           unsigned long start, unsigned long rw_addr,
1036                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
1037 {
1038         pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
1039         unsigned long end = start + bytes;
1040         int err = 0;
1041
1042         while (!err && start < end) {
1043                 struct page *page = NULL;
1044                 phys_addr_t phys;
1045                 size_t count;
1046                 void *src;
1047
1048                 if (!h->heap_pgalloc) {
1049                         phys = h->carveout->base + start;
1050                 } else {
1051                         page =
1052                            nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1053                         BUG_ON(!page);
1054                         get_page(page);
1055                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
1056                 }
1057
1058                 set_pte_at(&init_mm, kaddr, pte,
1059                            pfn_pte(__phys_to_pfn(phys), prot));
1060                 nvmap_flush_tlb_kernel_page(kaddr);
1061
1062                 src = (void *)kaddr + (phys & ~PAGE_MASK);
1063                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1064                 count = min_t(size_t, end - start, phys);
1065
1066                 if (is_read)
1067                         err = copy_to_user((void *)rw_addr, src, count);
1068                 else
1069                         err = copy_from_user(src, (void *)rw_addr, count);
1070
1071                 if (err)
1072                         err = -EFAULT;
1073
1074                 rw_addr += count;
1075                 start += count;
1076
1077                 if (page)
1078                         put_page(page);
1079         }
1080
1081         return err;
1082 }
1083
1084 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1085                          int is_read, unsigned long h_offs,
1086                          unsigned long sys_addr, unsigned long h_stride,
1087                          unsigned long sys_stride, unsigned long elem_size,
1088                          unsigned long count)
1089 {
1090         ssize_t copied = 0;
1091         pte_t **pte;
1092         void *addr;
1093         int ret = 0;
1094
1095         if (!elem_size)
1096                 return -EINVAL;
1097
1098         if (!h->alloc)
1099                 return -EFAULT;
1100
1101         if (elem_size == h_stride && elem_size == sys_stride) {
1102                 elem_size *= count;
1103                 h_stride = elem_size;
1104                 sys_stride = elem_size;
1105                 count = 1;
1106         }
1107
1108         pte = nvmap_alloc_pte(nvmap_dev, &addr);
1109         if (IS_ERR(pte))
1110                 return PTR_ERR(pte);
1111
1112         while (count--) {
1113                 if (h_offs + elem_size > h->size) {
1114                         nvmap_warn(client, "read/write outside of handle\n");
1115                         ret = -EFAULT;
1116                         break;
1117                 }
1118                 if (is_read)
1119                         __nvmap_do_cache_maint(client, h, h_offs,
1120                                 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1121
1122                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1123                                      elem_size, (unsigned long)addr, *pte);
1124
1125                 if (ret)
1126                         break;
1127
1128                 if (!is_read)
1129                         __nvmap_do_cache_maint(client, h, h_offs,
1130                                 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1131                                 false);
1132
1133                 copied += elem_size;
1134                 sys_addr += sys_stride;
1135                 h_offs += h_stride;
1136         }
1137
1138         nvmap_free_pte(nvmap_dev, pte);
1139         return ret ?: copied;
1140 }
1141
1142 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1143                                  bool is_reserve_ioctl)
1144 {
1145         struct nvmap_cache_op_list op;
1146         u32 *handle_ptr;
1147         u32 *offset_ptr;
1148         u32 *size_ptr;
1149         struct nvmap_handle **refs;
1150         int i, err = 0;
1151
1152         if (copy_from_user(&op, arg, sizeof(op)))
1153                 return -EFAULT;
1154
1155         if (!op.nr)
1156                 return -EINVAL;
1157
1158         if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1159                 return -EFAULT;
1160
1161         if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1162                 return -EFAULT;
1163
1164         if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1165                 return -EFAULT;
1166
1167         if (!op.offsets || !op.sizes)
1168                 return -EINVAL;
1169
1170         refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1171
1172         if (!refs)
1173                 return -ENOMEM;
1174
1175         handle_ptr = (u32 *)(uintptr_t)op.handles;
1176         offset_ptr = (u32 *)(uintptr_t)op.offsets;
1177         size_ptr = (u32 *)(uintptr_t)op.sizes;
1178
1179         for (i = 0; i < op.nr; i++) {
1180                 u32 handle;
1181
1182                 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1183                         err = -EFAULT;
1184                         goto free_mem;
1185                 }
1186
1187                 refs[i] = unmarshal_user_handle(handle);
1188                 if (!refs[i]) {
1189                         err = -EINVAL;
1190                         goto free_mem;
1191                 }
1192         }
1193
1194         if (is_reserve_ioctl)
1195                 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1196                                           op.nr, op.op);
1197         else
1198                 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,
1199                                                 op.op, op.nr);
1200
1201 free_mem:
1202         kfree(refs);
1203         return err;
1204 }
1205