2 * drivers/video/tegra/nvmap/nvmap_ioctl.c
4 * User-space interface to nvmap
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #define pr_fmt(fmt) "nvmap: %s() " fmt, __func__
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/nvmap.h>
33 #include <linux/vmalloc.h>
35 #include <asm/memory.h>
37 #include <trace/events/nvmap.h>
39 #include "nvmap_ioctl.h"
40 #include "nvmap_priv.h"
42 #include <linux/list.h>
44 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
45 int is_read, unsigned long h_offs,
46 unsigned long sys_addr, unsigned long h_stride,
47 unsigned long sys_stride, unsigned long elem_size,
50 /* NOTE: Callers of this utility function must invoke nvmap_handle_put after
51 * using the returned nvmap_handle.
53 struct nvmap_handle *unmarshal_user_handle(__u32 handle)
55 struct nvmap_handle *h;
57 h = nvmap_get_id_from_dmabuf_fd(NULL, (int)handle);
64 * marshal_id/unmarshal_id are for get_id/handle_from_id.
65 * These are added to support using Fd's for handle.
68 static __u32 marshal_id(struct nvmap_handle *handle)
70 return (__u32)((uintptr_t)handle >> 2);
73 static struct nvmap_handle *unmarshal_id(__u32 id)
75 uintptr_t h = ((id << 2) | PAGE_OFFSET);
77 return (struct nvmap_handle *)h;
80 static __u32 marshal_id(struct nvmap_handle *handle)
82 return (uintptr_t)handle;
85 static struct nvmap_handle *unmarshal_id(__u32 id)
87 return (struct nvmap_handle *)id;
91 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
93 if (!virt_addr_valid(ref))
98 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
102 struct nvmap_pin_handle_32 op32;
103 __u32 __user *output32 = NULL;
105 struct nvmap_pin_handle op;
106 struct nvmap_handle *h;
107 struct nvmap_handle *on_stack[16];
108 struct nvmap_handle **refs;
109 unsigned long __user *output = NULL;
111 u32 i, n_unmarshal_handles = 0;
115 if (copy_from_user(&op32, arg, sizeof(op32)))
117 op.handles = (__u32 *)(uintptr_t)op32.handles;
118 op.count = op32.count;
121 if (copy_from_user(&op, arg, sizeof(op)))
128 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
130 if (op.count > ARRAY_SIZE(on_stack))
131 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
138 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
143 for (i = 0; i < op.count; i++) {
145 if (__get_user(handle, &op.handles[i])) {
149 refs[i] = unmarshal_user_handle(handle);
154 n_unmarshal_handles++;
158 /* Yes, we're storing a u32 in a pointer */
159 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
164 n_unmarshal_handles++;
167 trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
169 err = nvmap_pin_ids(filp->private_data, op.count, refs);
171 nvmap_unpin_ids(filp->private_data, op.count, refs);
173 /* skip the output stage on unpin */
177 /* it is guaranteed that if nvmap_pin_ids returns 0 that
178 * all of the handle_ref objects are valid, so dereferencing
179 * directly here is safe */
183 output32 = (__u32 *)(uintptr_t)op.addr;
185 struct nvmap_pin_handle_32 __user *tmp = arg;
186 output32 = &tmp->addr;
197 struct nvmap_pin_handle __user *tmp = arg;
198 output = (unsigned long *)&tmp->addr;
205 for (i = 0; i < op.count && !err; i++) {
210 addr = sg_dma_address(
211 ((struct sg_table *)h->attachment->priv)->sgl);
213 addr = h->carveout->base;
217 err = put_user((__u32)addr, &output32[i]);
220 err = put_user(addr, &output[i]);
224 nvmap_unpin_ids(filp->private_data, op.count, refs);
227 for (i = 0; i < n_unmarshal_handles; i++)
228 nvmap_handle_put(refs[i]);
230 if (refs != on_stack)
236 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
238 struct nvmap_create_handle op;
239 struct nvmap_handle *h = NULL;
241 if (copy_from_user(&op, arg, sizeof(op)))
244 h = unmarshal_user_handle(op.handle);
248 op.id = marshal_id(h);
251 return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
254 static int nvmap_share_release(struct inode *inode, struct file *file)
256 struct nvmap_handle *h = file->private_data;
262 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
264 /* unsupported operation */
265 WARN(1, "mmap is not supported on fd, which shares nvmap handle");
269 const struct file_operations nvmap_fd_fops = {
270 .owner = THIS_MODULE,
271 .release = nvmap_share_release,
272 .mmap = nvmap_share_mmap,
275 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
277 struct nvmap_handle *handle;
278 struct nvmap_create_handle op;
279 struct nvmap_client *client = filp->private_data;
281 if (copy_from_user(&op, arg, sizeof(op)))
284 handle = unmarshal_user_handle(op.handle);
288 op.fd = nvmap_get_dmabuf_fd(client, handle);
289 nvmap_handle_put(handle);
293 if (copy_to_user(arg, &op, sizeof(op))) {
300 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
302 struct nvmap_alloc_handle op;
303 struct nvmap_client *client = filp->private_data;
304 struct nvmap_handle *handle;
307 if (copy_from_user(&op, arg, sizeof(op)))
310 if (op.align & (op.align - 1))
313 handle = unmarshal_user_handle(op.handle);
317 /* user-space handles are aligned to page boundaries, to prevent
319 op.align = max_t(size_t, op.align, PAGE_SIZE);
321 err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
323 op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
324 nvmap_handle_put(handle);
328 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
330 struct nvmap_alloc_kind_handle op;
331 struct nvmap_client *client = filp->private_data;
332 struct nvmap_handle *handle;
335 if (copy_from_user(&op, arg, sizeof(op)))
338 if (op.align & (op.align - 1))
341 handle = unmarshal_user_handle(op.handle);
345 /* user-space handles are aligned to page boundaries, to prevent
347 op.align = max_t(size_t, op.align, PAGE_SIZE);
349 err = nvmap_alloc_handle(client, handle,
354 nvmap_handle_put(handle);
358 int nvmap_create_fd(struct nvmap_client *client, struct nvmap_handle *h)
362 fd = __nvmap_dmabuf_fd(client, h->dmabuf, O_CLOEXEC);
365 pr_err("Out of file descriptors");
368 /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
369 * fd close drops one ref count on dmabuf->file *.
370 * to balance ref count, ref count dma_buf.
372 get_dma_buf(h->dmabuf);
376 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
378 struct nvmap_create_handle op;
379 struct nvmap_handle_ref *ref = NULL;
380 struct nvmap_client *client = filp->private_data;
384 if (copy_from_user(&op, arg, sizeof(op)))
390 if (cmd == NVMAP_IOC_CREATE) {
391 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
393 ref->handle->orig_size = op.size;
394 } else if (cmd == NVMAP_IOC_FROM_ID) {
395 ref = nvmap_duplicate_handle(client, unmarshal_id(op.id), 0);
396 } else if (cmd == NVMAP_IOC_FROM_FD) {
397 ref = nvmap_create_handle_from_fd(client, op.fd);
405 fd = nvmap_create_fd(client, ref->handle);
411 if (copy_to_user(arg, &op, sizeof(op))) {
413 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
421 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
423 struct nvmap_client *client = filp->private_data;
424 struct nvmap_map_caller op;
426 struct nvmap_map_caller_32 op32;
428 struct nvmap_vma_priv *priv;
429 struct vm_area_struct *vma;
430 struct nvmap_handle *h = NULL;
435 if (copy_from_user(&op32, arg, sizeof(op32)))
437 op.handle = op32.handle;
438 op.offset = op32.offset;
439 op.length = op32.length;
440 op.flags = op32.flags;
444 if (copy_from_user(&op, arg, sizeof(op)))
447 h = unmarshal_user_handle(op.handle);
456 trace_nvmap_map_into_caller_ptr(client, h, op.offset,
457 op.length, op.flags);
458 down_read(¤t->mm->mmap_sem);
460 vma = find_vma(current->mm, op.addr);
466 if (op.offset & ~PAGE_MASK) {
471 if (op.offset >= h->size || op.length > h->size - op.offset) {
472 err = -EADDRNOTAVAIL;
476 /* the VMA must exactly match the requested mapping operation, and the
477 * VMA that is targetted must have been created by this driver
479 if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
480 (vma->vm_end-vma->vm_start != op.length)) {
485 /* verify that each mmap() system call creates a unique VMA */
486 if (vma->vm_private_data)
489 if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
494 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
500 vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
502 priv->offs = op.offset;
503 vma->vm_private_data = priv;
504 vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
508 up_read(¤t->mm->mmap_sem);
515 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
518 struct nvmap_handle_param_32 __user *uarg32 = arg;
520 struct nvmap_handle_param __user *uarg = arg;
521 struct nvmap_handle_param op;
522 struct nvmap_client *client = filp->private_data;
523 struct nvmap_handle_ref *ref;
524 struct nvmap_handle *h;
529 /* This is safe because the incoming value of result doesn't matter */
531 if (copy_from_user(&op, arg,
532 sizeof(struct nvmap_handle_param_32)))
536 if (copy_from_user(&op, arg, sizeof(op)))
539 h = unmarshal_user_handle(op.handle);
543 nvmap_ref_lock(client);
544 ref = __nvmap_validate_locked(client, h);
545 if (IS_ERR_OR_NULL(ref)) {
546 err = ref ? PTR_ERR(ref) : -EINVAL;
550 err = nvmap_get_handle_param(client, ref, op.param, &result);
554 err = put_user((__u32)result, &uarg32->result);
557 err = put_user((unsigned long)result, &uarg->result);
560 nvmap_ref_unlock(client);
565 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
568 struct nvmap_client *client = filp->private_data;
569 struct nvmap_rw_handle __user *uarg = arg;
570 struct nvmap_rw_handle op;
572 struct nvmap_rw_handle_32 __user *uarg32 = arg;
573 struct nvmap_rw_handle_32 op32;
575 struct nvmap_handle *h;
581 if (copy_from_user(&op32, arg, sizeof(op32)))
584 op.handle = op32.handle;
585 op.offset = op32.offset;
586 op.elem_size = op32.elem_size;
587 op.hmem_stride = op32.hmem_stride;
588 op.user_stride = op32.user_stride;
589 op.count = op32.count;
592 if (copy_from_user(&op, arg, sizeof(op)))
595 if (!op.addr || !op.count || !op.elem_size)
598 h = unmarshal_user_handle(op.handle);
602 trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
603 op.addr, op.hmem_stride,
604 op.user_stride, op.elem_size, op.count);
605 copied = rw_handle(client, h, is_read, op.offset,
606 (unsigned long)op.addr, op.hmem_stride,
607 op.user_stride, op.elem_size, op.count);
612 } else if (copied < (op.count * op.elem_size))
617 __put_user(copied, &uarg32->count);
620 __put_user(copied, &uarg->count);
627 static int __nvmap_cache_maint(struct nvmap_client *client,
628 struct nvmap_cache_op *op)
630 struct vm_area_struct *vma;
631 struct nvmap_vma_priv *priv;
632 struct nvmap_handle *handle;
637 if (!op->addr || op->op < NVMAP_CACHE_OP_WB ||
638 op->op > NVMAP_CACHE_OP_WB_INV)
641 handle = unmarshal_user_handle(op->handle);
645 down_read(¤t->mm->mmap_sem);
647 vma = find_vma(current->active_mm, (unsigned long)op->addr);
648 if (!vma || !is_nvmap_vma(vma) ||
649 (ulong)op->addr < vma->vm_start ||
650 (ulong)op->addr >= vma->vm_end ||
651 op->len > vma->vm_end - (ulong)op->addr) {
652 err = -EADDRNOTAVAIL;
656 priv = (struct nvmap_vma_priv *)vma->vm_private_data;
658 if (priv->handle != handle) {
663 start = (unsigned long)op->addr - vma->vm_start +
664 (vma->vm_pgoff << PAGE_SHIFT);
665 end = start + op->len;
667 err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
670 up_read(¤t->mm->mmap_sem);
671 nvmap_handle_put(handle);
675 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
677 struct nvmap_client *client = filp->private_data;
678 struct nvmap_cache_op op;
680 struct nvmap_cache_op_32 op32;
685 if (copy_from_user(&op32, arg, sizeof(op32)))
688 op.handle = op32.handle;
693 if (copy_from_user(&op, arg, sizeof(op)))
696 return __nvmap_cache_maint(client, &op);
699 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
701 struct nvmap_client *client = filp->private_data;
706 nvmap_free_handle_user_id(client, arg);
707 return sys_close(arg);
710 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
712 if (op == NVMAP_CACHE_OP_WB_INV)
713 dmac_flush_range(vaddr, vaddr + size);
714 else if (op == NVMAP_CACHE_OP_INV)
715 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
717 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
720 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
722 if (op == NVMAP_CACHE_OP_WB_INV)
723 outer_flush_range(paddr, paddr + size);
724 else if (op == NVMAP_CACHE_OP_INV)
725 outer_inv_range(paddr, paddr + size);
727 outer_clean_range(paddr, paddr + size);
730 static void heap_page_cache_maint(
731 struct nvmap_handle *h, unsigned long start, unsigned long end,
732 unsigned int op, bool inner, bool outer,
733 unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
735 if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
737 * zap user VA->PA mappings so that any access to the pages
738 * will result in a fault and can be marked dirty
740 nvmap_handle_mkclean(h, start, end-start);
741 nvmap_zap_handle(h, start, end - start);
744 #ifdef NVMAP_LAZY_VFREE
750 pages = nvmap_pages(h->pgalloc.pages,
751 h->size >> PAGE_SHIFT);
753 goto per_page_cache_maint;
754 vaddr = vm_map_ram(pages,
755 h->size >> PAGE_SHIFT, -1, prot);
757 (h->size >> PAGE_SHIFT) * sizeof(*pages));
759 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
760 vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
762 /* Fast inner cache maintenance using single mapping */
763 inner_cache_maint(op, h->vaddr + start, end - start);
766 /* Skip per-page inner maintenance in loop below */
770 per_page_cache_maint:
773 while (start < end) {
780 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
781 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
782 off = start & ~PAGE_MASK;
784 paddr = page_to_phys(page) + off;
787 void *vaddr = (void *)kaddr + off;
789 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
791 inner_cache_maint(op, vaddr, size);
792 unmap_kernel_range(kaddr, PAGE_SIZE);
796 outer_cache_maint(op, paddr, size);
801 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
802 static bool fast_cache_maint_outer(unsigned long start,
803 unsigned long end, unsigned int op)
806 if (end - start >= cache_maint_outer_threshold) {
807 if (op == NVMAP_CACHE_OP_WB_INV) {
811 if (op == NVMAP_CACHE_OP_WB) {
820 static inline bool fast_cache_maint_outer(unsigned long start,
821 unsigned long end, unsigned int op)
827 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
828 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
830 unsigned long end, unsigned int op)
832 if ((op == NVMAP_CACHE_OP_INV) ||
833 ((end - start) < cache_maint_inner_threshold))
838 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
840 unsigned long end, unsigned int op)
846 static bool fast_cache_maint(struct nvmap_handle *h,
848 unsigned long end, unsigned int op,
849 bool clean_only_dirty)
851 if (!can_fast_cache_maint(h, start, end, op))
854 if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
855 nvmap_handle_mkclean(h, 0, h->size);
856 nvmap_zap_handle(h, 0, h->size);
859 if (op == NVMAP_CACHE_OP_WB_INV)
860 inner_flush_cache_all();
861 else if (op == NVMAP_CACHE_OP_WB)
862 inner_clean_cache_all();
864 /* outer maintenance */
865 if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
866 if(!fast_cache_maint_outer(start, end, op))
868 if (h->heap_pgalloc) {
869 heap_page_cache_maint(h, start,
870 end, op, false, true, 0, 0,
875 pstart = start + h->carveout->base;
876 outer_cache_maint(op, pstart, end - start);
883 struct cache_maint_op {
887 struct nvmap_handle *h;
890 bool clean_only_dirty;
893 static int do_cache_maint(struct cache_maint_op *cache_work)
897 phys_addr_t pstart = cache_work->start;
898 phys_addr_t pend = cache_work->end;
901 struct nvmap_handle *h = cache_work->h;
902 struct nvmap_client *client;
903 unsigned int op = cache_work->op;
904 struct vm_struct *area = NULL;
910 if (can_fast_cache_maint(h, pstart, pend, op))
911 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
913 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
914 trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
915 trace_nvmap_cache_flush(pend - pstart,
916 nvmap_stats_read(NS_ALLOC),
917 nvmap_stats_read(NS_CFLUSH_RQ),
918 nvmap_stats_read(NS_CFLUSH_DONE));
921 if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
922 h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
925 if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
928 prot = nvmap_pgprot(h, PG_PROT_KERNEL);
929 area = alloc_vm_area(PAGE_SIZE, NULL);
934 kaddr = (ulong)area->addr;
936 if (h->heap_pgalloc) {
937 heap_page_cache_maint(h, pstart, pend, op, true,
938 (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
939 false : true, kaddr, prot,
940 cache_work->clean_only_dirty);
944 if (pstart > h->size || pend > h->size) {
945 pr_warn("cache maintenance outside handle\n");
950 pstart += h->carveout->base;
951 pend += h->carveout->base;
954 while (loop < pend) {
955 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
956 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
957 next = min(next, pend);
959 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
961 inner_cache_maint(op, base, next - loop);
963 unmap_kernel_range(kaddr, PAGE_SIZE);
966 if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
967 outer_cache_maint(op, pstart, pend - pstart);
975 int __nvmap_do_cache_maint(struct nvmap_client *client,
976 struct nvmap_handle *h,
977 unsigned long start, unsigned long end,
978 unsigned int op, bool clean_only_dirty)
981 struct cache_maint_op cache_op;
983 h = nvmap_handle_get(h);
987 if (op == NVMAP_CACHE_OP_INV)
988 op = NVMAP_CACHE_OP_WB_INV;
990 /* clean only dirty is applicable only for Write Back operation */
991 if (op != NVMAP_CACHE_OP_WB)
992 clean_only_dirty = false;
995 cache_op.start = start;
998 cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
999 h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1000 cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
1001 cache_op.clean_only_dirty = clean_only_dirty;
1003 nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
1004 err = do_cache_maint(&cache_op);
1005 nvmap_handle_put(h);
1009 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1010 unsigned long start, unsigned long rw_addr,
1011 unsigned long bytes, unsigned long kaddr)
1013 pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
1014 unsigned long end = start + bytes;
1017 while (!err && start < end) {
1018 struct page *page = NULL;
1023 if (!h->heap_pgalloc) {
1024 phys = h->carveout->base + start;
1027 nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1030 phys = page_to_phys(page) + (start & ~PAGE_MASK);
1033 ioremap_page_range(kaddr, kaddr + PAGE_SIZE, phys, prot);
1035 src = (void *)kaddr + (phys & ~PAGE_MASK);
1036 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1037 count = min_t(size_t, end - start, phys);
1040 err = copy_to_user((void *)rw_addr, src, count);
1042 err = copy_from_user(src, (void *)rw_addr, count);
1052 unmap_kernel_range(kaddr, PAGE_SIZE);
1058 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1059 int is_read, unsigned long h_offs,
1060 unsigned long sys_addr, unsigned long h_stride,
1061 unsigned long sys_stride, unsigned long elem_size,
1062 unsigned long count)
1067 struct vm_struct *area;
1069 if (!elem_size || !count)
1075 if (elem_size == h_stride && elem_size == sys_stride) {
1077 h_stride = elem_size;
1078 sys_stride = elem_size;
1082 if (elem_size > h->size ||
1083 h_offs >= h->size ||
1084 elem_size > sys_stride ||
1085 elem_size > h_stride ||
1086 sys_stride > (h->size - h_offs) / count ||
1087 h_stride > (h->size - h_offs) / count)
1090 area = alloc_vm_area(PAGE_SIZE, NULL);
1096 if (h_offs + elem_size > h->size) {
1097 pr_warn("read/write outside of handle\n");
1102 __nvmap_do_cache_maint(client, h, h_offs,
1103 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1105 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1106 elem_size, (unsigned long)addr);
1112 __nvmap_do_cache_maint(client, h, h_offs,
1113 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1116 copied += elem_size;
1117 sys_addr += sys_stride;
1122 return ret ?: copied;
1125 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1126 bool is_reserve_ioctl)
1128 struct nvmap_cache_op_list op;
1132 struct nvmap_handle **refs;
1134 u32 i, n_unmarshal_handles = 0;
1136 if (copy_from_user(&op, arg, sizeof(op)))
1142 if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1145 if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1148 if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1151 if (!op.offsets || !op.sizes)
1154 refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1159 handle_ptr = (u32 *)(uintptr_t)op.handles;
1160 offset_ptr = (u32 *)(uintptr_t)op.offsets;
1161 size_ptr = (u32 *)(uintptr_t)op.sizes;
1163 for (i = 0; i < op.nr; i++) {
1166 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1171 refs[i] = unmarshal_user_handle(handle);
1176 n_unmarshal_handles++;
1179 if (is_reserve_ioctl)
1180 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1183 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,
1187 for (i = 0; i < n_unmarshal_handles; i++)
1188 nvmap_handle_put(refs[i]);