]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_ioctl.c
a71322b76f238ee07732a14580ce0ce268edffa1
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "nvmap: %s() " fmt, __func__
24
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/nvmap.h>
33 #include <linux/vmalloc.h>
34
35 #include <asm/memory.h>
36
37 #include <trace/events/nvmap.h>
38
39 #include "nvmap_ioctl.h"
40 #include "nvmap_priv.h"
41
42 #include <linux/list.h>
43
44 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
45                          int is_read, unsigned long h_offs,
46                          unsigned long sys_addr, unsigned long h_stride,
47                          unsigned long sys_stride, unsigned long elem_size,
48                          unsigned long count);
49
50 /* NOTE: Callers of this utility function must invoke nvmap_handle_put after
51  * using the returned nvmap_handle.
52  */
53 struct nvmap_handle *unmarshal_user_handle(__u32 handle)
54 {
55         struct nvmap_handle *h;
56
57         h = nvmap_get_id_from_dmabuf_fd(NULL, (int)handle);
58         if (!IS_ERR(h))
59                 return h;
60         return 0;
61 }
62
63 /*
64  * marshal_id/unmarshal_id are for get_id/handle_from_id.
65  * These are added to support using Fd's for handle.
66  */
67 #ifdef CONFIG_ARM64
68 static __u32 marshal_id(struct nvmap_handle *handle)
69 {
70         return (__u32)((uintptr_t)handle >> 2);
71 }
72
73 static struct nvmap_handle *unmarshal_id(__u32 id)
74 {
75         uintptr_t h = ((id << 2) | PAGE_OFFSET);
76
77         return (struct nvmap_handle *)h;
78 }
79 #else
80 static __u32 marshal_id(struct nvmap_handle *handle)
81 {
82         return (uintptr_t)handle;
83 }
84
85 static struct nvmap_handle *unmarshal_id(__u32 id)
86 {
87         return (struct nvmap_handle *)id;
88 }
89 #endif
90
91 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
92 {
93         if (!virt_addr_valid(ref))
94                 return 0;
95         return ref->handle;
96 }
97
98 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
99                       bool is32)
100 {
101 #ifdef CONFIG_COMPAT
102         struct nvmap_pin_handle_32 op32;
103         __u32 __user *output32 = NULL;
104 #endif
105         struct nvmap_pin_handle op;
106         struct nvmap_handle *h;
107         struct nvmap_handle *on_stack[16];
108         struct nvmap_handle **refs;
109         unsigned long __user *output = NULL;
110         int err = 0;
111         u32 i, n_unmarshal_handles = 0;
112
113 #ifdef CONFIG_COMPAT
114         if (is32) {
115                 if (copy_from_user(&op32, arg, sizeof(op32)))
116                         return -EFAULT;
117                 op.handles = (__u32 *)(uintptr_t)op32.handles;
118                 op.count = op32.count;
119         } else
120 #endif
121                 if (copy_from_user(&op, arg, sizeof(op)))
122                         return -EFAULT;
123
124         if (!op.count)
125                 return -EINVAL;
126
127         if (op.count > 1) {
128                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
129
130                 if (op.count > ARRAY_SIZE(on_stack))
131                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
132                 else
133                         refs = on_stack;
134
135                 if (!refs)
136                         return -ENOMEM;
137
138                 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
139                         err = -EFAULT;
140                         goto out;
141                 }
142
143                 for (i = 0; i < op.count; i++) {
144                         u32 handle;
145                         if (__get_user(handle, &op.handles[i])) {
146                                 err = -EFAULT;
147                                 goto out;
148                         }
149                         refs[i] = unmarshal_user_handle(handle);
150                         if (!refs[i]) {
151                                 err = -EINVAL;
152                                 goto out;
153                         }
154                         n_unmarshal_handles++;
155                 }
156         } else {
157                 refs = on_stack;
158                 /* Yes, we're storing a u32 in a pointer */
159                 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
160                 if (!on_stack[0]) {
161                         err = -EINVAL;
162                         goto out;
163                 }
164                 n_unmarshal_handles++;
165         }
166
167         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
168         if (is_pin)
169                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
170         else
171                 nvmap_unpin_ids(filp->private_data, op.count, refs);
172
173         /* skip the output stage on unpin */
174         if (err || !is_pin)
175                 goto out;
176
177         /* it is guaranteed that if nvmap_pin_ids returns 0 that
178          * all of the handle_ref objects are valid, so dereferencing
179          * directly here is safe */
180 #ifdef CONFIG_COMPAT
181         if (is32) {
182                 if (op.count > 1)
183                         output32 = (__u32 *)(uintptr_t)op.addr;
184                 else {
185                         struct nvmap_pin_handle_32 __user *tmp = arg;
186                         output32 = &tmp->addr;
187                 }
188
189                 if (!output32)
190                         goto out;
191         } else
192 #endif
193         {
194                 if (op.count > 1)
195                         output = op.addr;
196                 else {
197                         struct nvmap_pin_handle __user *tmp = arg;
198                         output = (unsigned long *)&tmp->addr;
199                 }
200
201                 if (!output)
202                         goto out;
203         }
204
205         for (i = 0; i < op.count && !err; i++) {
206                 unsigned long addr;
207
208                 h = refs[i];
209                 if (h->heap_pgalloc)
210                         addr = sg_dma_address(
211                                 ((struct sg_table *)h->attachment->priv)->sgl);
212                 else
213                         addr = h->carveout->base;
214
215 #ifdef CONFIG_COMPAT
216                 if (is32)
217                         err = put_user((__u32)addr, &output32[i]);
218                 else
219 #endif
220                         err = put_user(addr, &output[i]);
221         }
222
223         if (err)
224                 nvmap_unpin_ids(filp->private_data, op.count, refs);
225
226 out:
227         for (i = 0; i < n_unmarshal_handles; i++)
228                 nvmap_handle_put(refs[i]);
229
230         if (refs != on_stack)
231                 kfree(refs);
232
233         return err;
234 }
235
236 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
237 {
238         struct nvmap_create_handle op;
239         struct nvmap_handle *h = NULL;
240
241         if (copy_from_user(&op, arg, sizeof(op)))
242                 return -EFAULT;
243
244         h = unmarshal_user_handle(op.handle);
245         if (!h)
246                 return -EINVAL;
247
248         op.id = marshal_id(h);
249         nvmap_handle_put(h);
250
251         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
252 }
253
254 static int nvmap_share_release(struct inode *inode, struct file *file)
255 {
256         struct nvmap_handle *h = file->private_data;
257
258         nvmap_handle_put(h);
259         return 0;
260 }
261
262 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
263 {
264         /* unsupported operation */
265         WARN(1, "mmap is not supported on fd, which shares nvmap handle");
266         return -EPERM;
267 }
268
269 const struct file_operations nvmap_fd_fops = {
270         .owner          = THIS_MODULE,
271         .release        = nvmap_share_release,
272         .mmap           = nvmap_share_mmap,
273 };
274
275 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
276 {
277         struct nvmap_handle *handle;
278         struct nvmap_create_handle op;
279         struct nvmap_client *client = filp->private_data;
280
281         if (copy_from_user(&op, arg, sizeof(op)))
282                 return -EFAULT;
283
284         handle = unmarshal_user_handle(op.handle);
285         if (!handle)
286                 return -EINVAL;
287
288         op.fd = nvmap_get_dmabuf_fd(client, handle);
289         nvmap_handle_put(handle);
290         if (op.fd < 0)
291                 return op.fd;
292
293         if (copy_to_user(arg, &op, sizeof(op))) {
294                 sys_close(op.fd);
295                 return -EFAULT;
296         }
297         return 0;
298 }
299
300 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
301 {
302         struct nvmap_alloc_handle op;
303         struct nvmap_client *client = filp->private_data;
304         struct nvmap_handle *handle;
305         int err;
306
307         if (copy_from_user(&op, arg, sizeof(op)))
308                 return -EFAULT;
309
310         if (op.align & (op.align - 1))
311                 return -EINVAL;
312
313         handle = unmarshal_user_handle(op.handle);
314         if (!handle)
315                 return -EINVAL;
316
317         /* user-space handles are aligned to page boundaries, to prevent
318          * data leakage. */
319         op.align = max_t(size_t, op.align, PAGE_SIZE);
320
321         err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
322                                   0, /* no kind */
323                                   op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
324         nvmap_handle_put(handle);
325         return err;
326 }
327
328 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
329 {
330         struct nvmap_alloc_kind_handle op;
331         struct nvmap_client *client = filp->private_data;
332         struct nvmap_handle *handle;
333         int err;
334
335         if (copy_from_user(&op, arg, sizeof(op)))
336                 return -EFAULT;
337
338         if (op.align & (op.align - 1))
339                 return -EINVAL;
340
341         handle = unmarshal_user_handle(op.handle);
342         if (!handle)
343                 return -EINVAL;
344
345         /* user-space handles are aligned to page boundaries, to prevent
346          * data leakage. */
347         op.align = max_t(size_t, op.align, PAGE_SIZE);
348
349         err = nvmap_alloc_handle(client, handle,
350                                   op.heap_mask,
351                                   op.align,
352                                   op.kind,
353                                   op.flags);
354         nvmap_handle_put(handle);
355         return err;
356 }
357
358 int nvmap_create_fd(struct nvmap_client *client, struct nvmap_handle *h)
359 {
360         int fd;
361
362         fd = __nvmap_dmabuf_fd(client, h->dmabuf, O_CLOEXEC);
363         BUG_ON(fd == 0);
364         if (fd < 0) {
365                 pr_err("Out of file descriptors");
366                 return fd;
367         }
368         /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
369          * fd close drops one ref count on dmabuf->file *.
370          * to balance ref count, ref count dma_buf.
371          */
372         get_dma_buf(h->dmabuf);
373         return fd;
374 }
375
376 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
377 {
378         struct nvmap_create_handle op;
379         struct nvmap_handle_ref *ref = NULL;
380         struct nvmap_client *client = filp->private_data;
381         int err = 0;
382         int fd = 0;
383
384         if (copy_from_user(&op, arg, sizeof(op)))
385                 return -EFAULT;
386
387         if (!client)
388                 return -ENODEV;
389
390         if (cmd == NVMAP_IOC_CREATE) {
391                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
392                 if (!IS_ERR(ref))
393                         ref->handle->orig_size = op.size;
394         } else if (cmd == NVMAP_IOC_FROM_ID) {
395                 ref = nvmap_duplicate_handle(client, unmarshal_id(op.id), 0);
396         } else if (cmd == NVMAP_IOC_FROM_FD) {
397                 ref = nvmap_create_handle_from_fd(client, op.fd);
398         } else {
399                 return -EINVAL;
400         }
401
402         if (IS_ERR(ref))
403                 return PTR_ERR(ref);
404
405         fd = nvmap_create_fd(client, ref->handle);
406         if (fd < 0)
407                 err = fd;
408
409         op.handle = fd;
410
411         if (copy_to_user(arg, &op, sizeof(op))) {
412                 err = -EFAULT;
413                 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
414         }
415
416         if (err && fd > 0)
417                 sys_close(fd);
418         return err;
419 }
420
421 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
422 {
423         struct nvmap_client *client = filp->private_data;
424         struct nvmap_map_caller op;
425 #ifdef CONFIG_COMPAT
426         struct nvmap_map_caller_32 op32;
427 #endif
428         struct nvmap_vma_priv *priv;
429         struct vm_area_struct *vma;
430         struct nvmap_handle *h = NULL;
431         int err = 0;
432
433 #ifdef CONFIG_COMPAT
434         if (is32) {
435                 if (copy_from_user(&op32, arg, sizeof(op32)))
436                         return -EFAULT;
437                 op.handle = op32.handle;
438                 op.offset = op32.offset;
439                 op.length = op32.length;
440                 op.flags = op32.flags;
441                 op.addr = op32.addr;
442         } else
443 #endif
444                 if (copy_from_user(&op, arg, sizeof(op)))
445                         return -EFAULT;
446
447         h = unmarshal_user_handle(op.handle);
448         if (!h)
449                 return -EINVAL;
450
451         if(!h->alloc) {
452                 nvmap_handle_put(h);
453                 return -EFAULT;
454         }
455
456         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
457                                         op.length, op.flags);
458         down_read(&current->mm->mmap_sem);
459
460         vma = find_vma(current->mm, op.addr);
461         if (!vma) {
462                 err = -ENOMEM;
463                 goto out;
464         }
465
466         if (op.offset & ~PAGE_MASK) {
467                 err = -EFAULT;
468                 goto out;
469         }
470
471         if (op.offset >= h->size || op.length > h->size - op.offset) {
472                 err = -EADDRNOTAVAIL;
473                 goto out;
474         }
475
476         /* the VMA must exactly match the requested mapping operation, and the
477          * VMA that is targetted must have been created by this driver
478          */
479         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
480             (vma->vm_end-vma->vm_start != op.length)) {
481                 err = -EPERM;
482                 goto out;
483         }
484
485         /* verify that each mmap() system call creates a unique VMA */
486         if (vma->vm_private_data)
487                 goto out;
488
489         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
490                 err = -EFAULT;
491                 goto out;
492         }
493
494         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
495         if (!priv)  {
496                 err = -ENOMEM;
497                 goto out;
498         }
499
500         vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
501         priv->handle = h;
502         priv->offs = op.offset;
503         vma->vm_private_data = priv;
504         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
505         nvmap_vma_open(vma);
506
507 out:
508         up_read(&current->mm->mmap_sem);
509
510         if (err)
511                 nvmap_handle_put(h);
512         return err;
513 }
514
515 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
516 {
517 #ifdef CONFIG_COMPAT
518         struct nvmap_handle_param_32 __user *uarg32 = arg;
519 #endif
520         struct nvmap_handle_param __user *uarg = arg;
521         struct nvmap_handle_param op;
522         struct nvmap_client *client = filp->private_data;
523         struct nvmap_handle_ref *ref;
524         struct nvmap_handle *h;
525         u64 result;
526         int err = 0;
527
528 #ifdef CONFIG_COMPAT
529         /* This is safe because the incoming value of result doesn't matter */
530         if (is32) {
531                 if (copy_from_user(&op, arg,
532                                 sizeof(struct nvmap_handle_param_32)))
533                         return -EFAULT;
534         } else
535 #endif
536                 if (copy_from_user(&op, arg, sizeof(op)))
537                         return -EFAULT;
538
539         h = unmarshal_user_handle(op.handle);
540         if (!h)
541                 return -EINVAL;
542
543         nvmap_ref_lock(client);
544         ref = __nvmap_validate_locked(client, h);
545         if (IS_ERR_OR_NULL(ref)) {
546                 err = ref ? PTR_ERR(ref) : -EINVAL;
547                 goto ref_fail;
548         }
549
550         err = nvmap_get_handle_param(client, ref, op.param, &result);
551
552 #ifdef CONFIG_COMPAT
553         if (is32)
554                 err = put_user((__u32)result, &uarg32->result);
555         else
556 #endif
557                 err = put_user((unsigned long)result, &uarg->result);
558
559 ref_fail:
560         nvmap_ref_unlock(client);
561         nvmap_handle_put(h);
562         return err;
563 }
564
565 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
566                           bool is32)
567 {
568         struct nvmap_client *client = filp->private_data;
569         struct nvmap_rw_handle __user *uarg = arg;
570         struct nvmap_rw_handle op;
571 #ifdef CONFIG_COMPAT
572         struct nvmap_rw_handle_32 __user *uarg32 = arg;
573         struct nvmap_rw_handle_32 op32;
574 #endif
575         struct nvmap_handle *h;
576         ssize_t copied;
577         int err = 0;
578
579 #ifdef CONFIG_COMPAT
580         if (is32) {
581                 if (copy_from_user(&op32, arg, sizeof(op32)))
582                         return -EFAULT;
583                 op.addr = op32.addr;
584                 op.handle = op32.handle;
585                 op.offset = op32.offset;
586                 op.elem_size = op32.elem_size;
587                 op.hmem_stride = op32.hmem_stride;
588                 op.user_stride = op32.user_stride;
589                 op.count = op32.count;
590         } else
591 #endif
592                 if (copy_from_user(&op, arg, sizeof(op)))
593                         return -EFAULT;
594
595         if (!op.addr || !op.count || !op.elem_size)
596                 return -EINVAL;
597
598         h = unmarshal_user_handle(op.handle);
599         if (!h)
600                 return -EINVAL;
601
602         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
603                                     op.addr, op.hmem_stride,
604                                     op.user_stride, op.elem_size, op.count);
605         copied = rw_handle(client, h, is_read, op.offset,
606                            (unsigned long)op.addr, op.hmem_stride,
607                            op.user_stride, op.elem_size, op.count);
608
609         if (copied < 0) {
610                 err = copied;
611                 copied = 0;
612         } else if (copied < (op.count * op.elem_size))
613                 err = -EINTR;
614
615 #ifdef CONFIG_COMPAT
616         if (is32)
617                 __put_user(copied, &uarg32->count);
618         else
619 #endif
620                 __put_user(copied, &uarg->count);
621
622         nvmap_handle_put(h);
623
624         return err;
625 }
626
627 static int __nvmap_cache_maint(struct nvmap_client *client,
628                                struct nvmap_cache_op *op)
629 {
630         struct vm_area_struct *vma;
631         struct nvmap_vma_priv *priv;
632         struct nvmap_handle *handle;
633         unsigned long start;
634         unsigned long end;
635         int err = 0;
636
637         if (!op->addr || op->op < NVMAP_CACHE_OP_WB ||
638             op->op > NVMAP_CACHE_OP_WB_INV)
639                 return -EINVAL;
640
641         handle = unmarshal_user_handle(op->handle);
642         if (!handle)
643                 return -EINVAL;
644
645         down_read(&current->mm->mmap_sem);
646
647         vma = find_vma(current->active_mm, (unsigned long)op->addr);
648         if (!vma || !is_nvmap_vma(vma) ||
649             (ulong)op->addr < vma->vm_start ||
650             (ulong)op->addr >= vma->vm_end ||
651             op->len > vma->vm_end - (ulong)op->addr) {
652                 err = -EADDRNOTAVAIL;
653                 goto out;
654         }
655
656         priv = (struct nvmap_vma_priv *)vma->vm_private_data;
657
658         if (priv->handle != handle) {
659                 err = -EFAULT;
660                 goto out;
661         }
662
663         start = (unsigned long)op->addr - vma->vm_start +
664                 (vma->vm_pgoff << PAGE_SHIFT);
665         end = start + op->len;
666
667         err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
668                                      false);
669 out:
670         up_read(&current->mm->mmap_sem);
671         nvmap_handle_put(handle);
672         return err;
673 }
674
675 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
676 {
677         struct nvmap_client *client = filp->private_data;
678         struct nvmap_cache_op op;
679 #ifdef CONFIG_COMPAT
680         struct nvmap_cache_op_32 op32;
681 #endif
682
683 #ifdef CONFIG_COMPAT
684         if (is32) {
685                 if (copy_from_user(&op32, arg, sizeof(op32)))
686                         return -EFAULT;
687                 op.addr = op32.addr;
688                 op.handle = op32.handle;
689                 op.len = op32.len;
690                 op.op = op32.op;
691         } else
692 #endif
693                 if (copy_from_user(&op, arg, sizeof(op)))
694                         return -EFAULT;
695
696         return __nvmap_cache_maint(client, &op);
697 }
698
699 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
700 {
701         struct nvmap_client *client = filp->private_data;
702
703         if (!arg)
704                 return 0;
705
706         nvmap_free_handle_user_id(client, arg);
707         return sys_close(arg);
708 }
709
710 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
711 {
712         if (op == NVMAP_CACHE_OP_WB_INV)
713                 dmac_flush_range(vaddr, vaddr + size);
714         else if (op == NVMAP_CACHE_OP_INV)
715                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
716         else
717                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
718 }
719
720 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
721 {
722         if (op == NVMAP_CACHE_OP_WB_INV)
723                 outer_flush_range(paddr, paddr + size);
724         else if (op == NVMAP_CACHE_OP_INV)
725                 outer_inv_range(paddr, paddr + size);
726         else
727                 outer_clean_range(paddr, paddr + size);
728 }
729
730 static void heap_page_cache_maint(
731         struct nvmap_handle *h, unsigned long start, unsigned long end,
732         unsigned int op, bool inner, bool outer,
733         unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
734 {
735         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
736                 /*
737                  * zap user VA->PA mappings so that any access to the pages
738                  * will result in a fault and can be marked dirty
739                  */
740                 nvmap_handle_mkclean(h, start, end-start);
741                 nvmap_zap_handle(h, start, end - start);
742         }
743
744 #ifdef NVMAP_LAZY_VFREE
745         if (inner) {
746                 void *vaddr = NULL;
747
748                 if (!h->vaddr) {
749                         struct page **pages;
750                         pages = nvmap_pages(h->pgalloc.pages,
751                                             h->size >> PAGE_SHIFT);
752                         if (!pages)
753                                 goto per_page_cache_maint;
754                         vaddr = vm_map_ram(pages,
755                                         h->size >> PAGE_SHIFT, -1, prot);
756                         nvmap_altfree(pages,
757                                 (h->size >> PAGE_SHIFT) * sizeof(*pages));
758                 }
759                 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
760                         vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
761                 if (h->vaddr) {
762                         /* Fast inner cache maintenance using single mapping */
763                         inner_cache_maint(op, h->vaddr + start, end - start);
764                         if (!outer)
765                                 return;
766                         /* Skip per-page inner maintenance in loop below */
767                         inner = false;
768                 }
769         }
770 per_page_cache_maint:
771 #endif
772
773         while (start < end) {
774                 struct page *page;
775                 phys_addr_t paddr;
776                 unsigned long next;
777                 unsigned long off;
778                 size_t size;
779
780                 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
781                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
782                 off = start & ~PAGE_MASK;
783                 size = next - start;
784                 paddr = page_to_phys(page) + off;
785
786                 if (inner) {
787                         void *vaddr = (void *)kaddr + off;
788                         BUG_ON(!kaddr);
789                         ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
790                                 paddr, prot);
791                         inner_cache_maint(op, vaddr, size);
792                         unmap_kernel_range(kaddr, PAGE_SIZE);
793                 }
794
795                 if (outer)
796                         outer_cache_maint(op, paddr, size);
797                 start = next;
798         }
799 }
800
801 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
802 static bool fast_cache_maint_outer(unsigned long start,
803                 unsigned long end, unsigned int op)
804 {
805         bool result = false;
806         if (end - start >= cache_maint_outer_threshold) {
807                 if (op == NVMAP_CACHE_OP_WB_INV) {
808                         outer_flush_all();
809                         result = true;
810                 }
811                 if (op == NVMAP_CACHE_OP_WB) {
812                         outer_clean_all();
813                         result = true;
814                 }
815         }
816
817         return result;
818 }
819 #else
820 static inline bool fast_cache_maint_outer(unsigned long start,
821                 unsigned long end, unsigned int op)
822 {
823         return false;
824 }
825 #endif
826
827 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
828 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
829         unsigned long start,
830         unsigned long end, unsigned int op)
831 {
832         if ((op == NVMAP_CACHE_OP_INV) ||
833                 ((end - start) < cache_maint_inner_threshold))
834                 return false;
835         return true;
836 }
837 #else
838 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
839         unsigned long start,
840         unsigned long end, unsigned int op)
841 {
842         return false;
843 }
844 #endif
845
846 static bool fast_cache_maint(struct nvmap_handle *h,
847         unsigned long start,
848         unsigned long end, unsigned int op,
849         bool clean_only_dirty)
850 {
851         if (!can_fast_cache_maint(h, start, end, op))
852                 return false;
853
854         if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
855                 nvmap_handle_mkclean(h, 0, h->size);
856                 nvmap_zap_handle(h, 0, h->size);
857         }
858
859         if (op == NVMAP_CACHE_OP_WB_INV)
860                 inner_flush_cache_all();
861         else if (op == NVMAP_CACHE_OP_WB)
862                 inner_clean_cache_all();
863
864         /* outer maintenance */
865         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
866                 if(!fast_cache_maint_outer(start, end, op))
867                 {
868                         if (h->heap_pgalloc) {
869                                 heap_page_cache_maint(h, start,
870                                         end, op, false, true, 0, 0,
871                                         clean_only_dirty);
872                         } else  {
873                                 phys_addr_t pstart;
874
875                                 pstart = start + h->carveout->base;
876                                 outer_cache_maint(op, pstart, end - start);
877                         }
878                 }
879         }
880         return true;
881 }
882
883 struct cache_maint_op {
884         phys_addr_t start;
885         phys_addr_t end;
886         unsigned int op;
887         struct nvmap_handle *h;
888         bool inner;
889         bool outer;
890         bool clean_only_dirty;
891 };
892
893 static int do_cache_maint(struct cache_maint_op *cache_work)
894 {
895         pgprot_t prot;
896         unsigned long kaddr;
897         phys_addr_t pstart = cache_work->start;
898         phys_addr_t pend = cache_work->end;
899         phys_addr_t loop;
900         int err = 0;
901         struct nvmap_handle *h = cache_work->h;
902         struct nvmap_client *client;
903         unsigned int op = cache_work->op;
904         struct vm_struct *area = NULL;
905
906         if (!h || !h->alloc)
907                 return -EFAULT;
908
909         client = h->owner;
910         if (can_fast_cache_maint(h, pstart, pend, op))
911                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
912         else
913                 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
914         trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
915         trace_nvmap_cache_flush(pend - pstart,
916                 nvmap_stats_read(NS_ALLOC),
917                 nvmap_stats_read(NS_CFLUSH_RQ),
918                 nvmap_stats_read(NS_CFLUSH_DONE));
919
920         wmb();
921         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
922             h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
923                 goto out;
924
925         if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
926                 goto out;
927
928         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
929         area = alloc_vm_area(PAGE_SIZE, NULL);
930         if (!area) {
931                 err = -ENOMEM;
932                 goto out;
933         }
934         kaddr = (ulong)area->addr;
935
936         if (h->heap_pgalloc) {
937                 heap_page_cache_maint(h, pstart, pend, op, true,
938                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
939                         false : true, kaddr, prot,
940                         cache_work->clean_only_dirty);
941                 goto out;
942         }
943
944         if (pstart > h->size || pend > h->size) {
945                 pr_warn("cache maintenance outside handle\n");
946                 err = -EINVAL;
947                 goto out;
948         }
949
950         pstart += h->carveout->base;
951         pend += h->carveout->base;
952         loop = pstart;
953
954         while (loop < pend) {
955                 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
956                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
957                 next = min(next, pend);
958
959                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
960                         loop, prot);
961                 inner_cache_maint(op, base, next - loop);
962                 loop = next;
963                 unmap_kernel_range(kaddr, PAGE_SIZE);
964         }
965
966         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
967                 outer_cache_maint(op, pstart, pend - pstart);
968
969 out:
970         if (area)
971                 free_vm_area(area);
972         return err;
973 }
974
975 int __nvmap_do_cache_maint(struct nvmap_client *client,
976                         struct nvmap_handle *h,
977                         unsigned long start, unsigned long end,
978                         unsigned int op, bool clean_only_dirty)
979 {
980         int err;
981         struct cache_maint_op cache_op;
982
983         h = nvmap_handle_get(h);
984         if (!h)
985                 return -EFAULT;
986
987         if (op == NVMAP_CACHE_OP_INV)
988                 op = NVMAP_CACHE_OP_WB_INV;
989
990         /* clean only dirty is applicable only for Write Back operation */
991         if (op != NVMAP_CACHE_OP_WB)
992                 clean_only_dirty = false;
993
994         cache_op.h = h;
995         cache_op.start = start;
996         cache_op.end = end;
997         cache_op.op = op;
998         cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
999                          h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1000         cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
1001         cache_op.clean_only_dirty = clean_only_dirty;
1002
1003         nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
1004         err = do_cache_maint(&cache_op);
1005         nvmap_handle_put(h);
1006         return err;
1007 }
1008
1009 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1010                           unsigned long start, unsigned long rw_addr,
1011                           unsigned long bytes, unsigned long kaddr)
1012 {
1013         pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
1014         unsigned long end = start + bytes;
1015         int err = 0;
1016
1017         while (!err && start < end) {
1018                 struct page *page = NULL;
1019                 phys_addr_t phys;
1020                 size_t count;
1021                 void *src;
1022
1023                 if (!h->heap_pgalloc) {
1024                         phys = h->carveout->base + start;
1025                 } else {
1026                         page =
1027                            nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1028                         BUG_ON(!page);
1029                         get_page(page);
1030                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
1031                 }
1032
1033                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE, phys, prot);
1034
1035                 src = (void *)kaddr + (phys & ~PAGE_MASK);
1036                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1037                 count = min_t(size_t, end - start, phys);
1038
1039                 if (is_read)
1040                         err = copy_to_user((void *)rw_addr, src, count);
1041                 else
1042                         err = copy_from_user(src, (void *)rw_addr, count);
1043
1044                 if (err)
1045                         err = -EFAULT;
1046
1047                 rw_addr += count;
1048                 start += count;
1049
1050                 if (page)
1051                         put_page(page);
1052                 unmap_kernel_range(kaddr, PAGE_SIZE);
1053         }
1054
1055         return err;
1056 }
1057
1058 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1059                          int is_read, unsigned long h_offs,
1060                          unsigned long sys_addr, unsigned long h_stride,
1061                          unsigned long sys_stride, unsigned long elem_size,
1062                          unsigned long count)
1063 {
1064         ssize_t copied = 0;
1065         void *addr;
1066         int ret = 0;
1067         struct vm_struct *area;
1068
1069         if (!elem_size || !count)
1070                 return -EINVAL;
1071
1072         if (!h->alloc)
1073                 return -EFAULT;
1074
1075         if (elem_size == h_stride && elem_size == sys_stride) {
1076                 elem_size *= count;
1077                 h_stride = elem_size;
1078                 sys_stride = elem_size;
1079                 count = 1;
1080         }
1081
1082         if (elem_size > h->size ||
1083                 h_offs >= h->size ||
1084                 elem_size > sys_stride ||
1085                 elem_size > h_stride ||
1086                 sys_stride > (h->size - h_offs) / count ||
1087                 h_stride > (h->size - h_offs) / count)
1088                 return -EINVAL;
1089
1090         area = alloc_vm_area(PAGE_SIZE, NULL);
1091         if (!area)
1092                 return -ENOMEM;
1093         addr = area->addr;
1094
1095         while (count--) {
1096                 if (h_offs + elem_size > h->size) {
1097                         pr_warn("read/write outside of handle\n");
1098                         ret = -EFAULT;
1099                         break;
1100                 }
1101                 if (is_read)
1102                         __nvmap_do_cache_maint(client, h, h_offs,
1103                                 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1104
1105                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1106                                      elem_size, (unsigned long)addr);
1107
1108                 if (ret)
1109                         break;
1110
1111                 if (!is_read)
1112                         __nvmap_do_cache_maint(client, h, h_offs,
1113                                 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1114                                 false);
1115
1116                 copied += elem_size;
1117                 sys_addr += sys_stride;
1118                 h_offs += h_stride;
1119         }
1120
1121         free_vm_area(area);
1122         return ret ?: copied;
1123 }
1124
1125 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1126                                  bool is_reserve_ioctl)
1127 {
1128         struct nvmap_cache_op_list op;
1129         u32 *handle_ptr;
1130         u32 *offset_ptr;
1131         u32 *size_ptr;
1132         struct nvmap_handle **refs;
1133         int err = 0;
1134         u32 i, n_unmarshal_handles = 0;
1135
1136         if (copy_from_user(&op, arg, sizeof(op)))
1137                 return -EFAULT;
1138
1139         if (!op.nr)
1140                 return -EINVAL;
1141
1142         if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1143                 return -EFAULT;
1144
1145         if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1146                 return -EFAULT;
1147
1148         if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1149                 return -EFAULT;
1150
1151         if (!op.offsets || !op.sizes)
1152                 return -EINVAL;
1153
1154         refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1155
1156         if (!refs)
1157                 return -ENOMEM;
1158
1159         handle_ptr = (u32 *)(uintptr_t)op.handles;
1160         offset_ptr = (u32 *)(uintptr_t)op.offsets;
1161         size_ptr = (u32 *)(uintptr_t)op.sizes;
1162
1163         for (i = 0; i < op.nr; i++) {
1164                 u32 handle;
1165
1166                 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1167                         err = -EFAULT;
1168                         goto free_mem;
1169                 }
1170
1171                 refs[i] = unmarshal_user_handle(handle);
1172                 if (!refs[i]) {
1173                         err = -EINVAL;
1174                         goto free_mem;
1175                 }
1176                 n_unmarshal_handles++;
1177         }
1178
1179         if (is_reserve_ioctl)
1180                 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1181                                           op.nr, op.op);
1182         else
1183                 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,
1184                                                 op.op, op.nr);
1185
1186 free_mem:
1187         for (i = 0; i < n_unmarshal_handles; i++)
1188                 nvmap_handle_put(refs[i]);
1189         kfree(refs);
1190         return err;
1191 }
1192