2 * drivers/video/tegra/nvmap/nvmap_ioctl.c
4 * User-space interface to nvmap
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #define pr_fmt(fmt) "nvmap: %s() " fmt, __func__
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nvmap.h>
32 #include <linux/vmalloc.h>
34 #include <asm/memory.h>
36 #include <trace/events/nvmap.h>
38 #include "nvmap_ioctl.h"
39 #include "nvmap_priv.h"
41 #include <linux/list.h>
43 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
44 int is_read, unsigned long h_offs,
45 unsigned long sys_addr, unsigned long h_stride,
46 unsigned long sys_stride, unsigned long elem_size,
49 static struct nvmap_handle *fd_to_handle_id(int handle)
51 struct nvmap_handle *h;
53 h = nvmap_get_id_from_dmabuf_fd(NULL, handle);
59 static struct nvmap_handle *unmarshal_user_handle(__u32 handle)
61 return fd_to_handle_id((int)handle);
64 struct nvmap_handle *unmarshal_user_id(u32 id)
66 return unmarshal_user_handle(id);
70 * marshal_id/unmarshal_id are for get_id/handle_from_id.
71 * These are added to support using Fd's for handle.
74 static __u32 marshal_id(struct nvmap_handle *handle)
76 return (__u32)((uintptr_t)handle >> 2);
79 static struct nvmap_handle *unmarshal_id(__u32 id)
81 uintptr_t h = ((id << 2) | PAGE_OFFSET);
83 return (struct nvmap_handle *)h;
86 static __u32 marshal_id(struct nvmap_handle *handle)
88 return (uintptr_t)handle;
91 static struct nvmap_handle *unmarshal_id(__u32 id)
93 return (struct nvmap_handle *)id;
97 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref)
99 if (!virt_addr_valid(ref))
104 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
108 struct nvmap_pin_handle_32 op32;
109 __u32 __user *output32 = NULL;
111 struct nvmap_pin_handle op;
112 struct nvmap_handle *h;
113 struct nvmap_handle *on_stack[16];
114 struct nvmap_handle **refs;
115 unsigned long __user *output;
121 if (copy_from_user(&op32, arg, sizeof(op32)))
123 op.handles = (__u32 *)(uintptr_t)op32.handles;
124 op.count = op32.count;
127 if (copy_from_user(&op, arg, sizeof(op)))
134 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
136 if (op.count > ARRAY_SIZE(on_stack))
137 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
144 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
149 for (i = 0; i < op.count; i++) {
151 if (__get_user(handle, &op.handles[i])) {
155 refs[i] = unmarshal_user_handle(handle);
164 /* Yes, we're storing a u32 in a pointer */
165 on_stack[0] = unmarshal_user_handle((u32)(uintptr_t)op.handles);
172 trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
174 err = nvmap_pin_ids(filp->private_data, op.count, refs);
176 nvmap_unpin_ids(filp->private_data, op.count, refs);
178 /* skip the output stage on unpin */
182 /* it is guaranteed that if nvmap_pin_ids returns 0 that
183 * all of the handle_ref objects are valid, so dereferencing
184 * directly here is safe */
188 output32 = (__u32 *)(uintptr_t)op.addr;
190 struct nvmap_pin_handle_32 __user *tmp = arg;
191 output32 = &tmp->addr;
202 struct nvmap_pin_handle __user *tmp = arg;
203 output = (unsigned long *)&tmp->addr;
210 for (i = 0; i < op.count && !err; i++) {
214 if (h->heap_pgalloc && h->pgalloc.contig)
215 addr = page_to_phys(h->pgalloc.pages[0]);
216 else if (h->heap_pgalloc)
217 addr = sg_dma_address(
218 ((struct sg_table *)h->attachment->priv)->sgl);
220 addr = h->carveout->base;
224 err = put_user((__u32)addr, &output32[i]);
227 err = put_user(addr, &output[i]);
231 nvmap_unpin_ids(filp->private_data, op.count, refs);
234 if (refs != on_stack)
240 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
242 struct nvmap_client *client = filp->private_data;
243 struct nvmap_create_handle op;
244 struct nvmap_handle *h = NULL;
246 if (copy_from_user(&op, arg, sizeof(op)))
249 h = unmarshal_user_handle(op.handle);
253 h = nvmap_handle_get(h);
258 op.id = marshal_id(h);
259 if (client == h->owner)
264 return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
267 static int nvmap_share_release(struct inode *inode, struct file *file)
269 struct nvmap_handle *h = file->private_data;
275 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
277 /* unsupported operation */
278 WARN(1, "mmap is not supported on fd, which shares nvmap handle");
282 const struct file_operations nvmap_fd_fops = {
283 .owner = THIS_MODULE,
284 .release = nvmap_share_release,
285 .mmap = nvmap_share_mmap,
288 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
290 struct nvmap_handle *handle;
291 struct nvmap_create_handle op;
292 struct nvmap_client *client = filp->private_data;
294 if (copy_from_user(&op, arg, sizeof(op)))
297 handle = unmarshal_user_handle(op.handle);
301 op.fd = nvmap_get_dmabuf_fd(client, handle);
305 if (copy_to_user(arg, &op, sizeof(op))) {
312 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
314 struct nvmap_alloc_handle op;
315 struct nvmap_client *client = filp->private_data;
316 struct nvmap_handle *handle;
318 if (copy_from_user(&op, arg, sizeof(op)))
321 handle = unmarshal_user_handle(op.handle);
325 if (op.align & (op.align - 1))
328 /* user-space handles are aligned to page boundaries, to prevent
330 op.align = max_t(size_t, op.align, PAGE_SIZE);
331 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
332 op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
335 return nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
337 op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
340 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
342 struct nvmap_alloc_kind_handle op;
343 struct nvmap_client *client = filp->private_data;
344 struct nvmap_handle *handle;
346 if (copy_from_user(&op, arg, sizeof(op)))
349 handle = unmarshal_user_handle(op.handle);
353 if (op.align & (op.align - 1))
356 /* user-space handles are aligned to page boundaries, to prevent
358 op.align = max_t(size_t, op.align, PAGE_SIZE);
359 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
360 op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
363 return nvmap_alloc_handle(client, handle,
370 int nvmap_create_fd(struct nvmap_handle *h)
374 fd = __nvmap_dmabuf_fd(h->dmabuf, O_CLOEXEC);
377 pr_err("Out of file descriptors");
380 /* __nvmap_dmabuf_fd() associates fd with dma_buf->file *.
381 * fd close drops one ref count on dmabuf->file *.
382 * to balance ref count, ref count dma_buf.
384 get_dma_buf(h->dmabuf);
388 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
390 struct nvmap_create_handle op;
391 struct nvmap_handle_ref *ref = NULL;
392 struct nvmap_client *client = filp->private_data;
396 if (copy_from_user(&op, arg, sizeof(op)))
402 if (cmd == NVMAP_IOC_CREATE) {
403 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
405 ref->handle->orig_size = op.size;
406 } else if (cmd == NVMAP_IOC_FROM_ID) {
407 ref = nvmap_duplicate_handle(client, unmarshal_id(op.id), 0);
408 } else if (cmd == NVMAP_IOC_FROM_FD) {
409 ref = nvmap_create_handle_from_fd(client, op.fd);
417 fd = nvmap_create_fd(ref->handle);
423 if (copy_to_user(arg, &op, sizeof(op))) {
425 nvmap_free_handle(client, __nvmap_ref_to_id(ref));
433 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
435 struct nvmap_client *client = filp->private_data;
436 struct nvmap_map_caller op;
438 struct nvmap_map_caller_32 op32;
440 struct nvmap_vma_priv *priv;
441 struct vm_area_struct *vma;
442 struct nvmap_handle *h = NULL;
447 if (copy_from_user(&op32, arg, sizeof(op32)))
449 op.handle = op32.handle;
450 op.offset = op32.offset;
451 op.length = op32.length;
452 op.flags = op32.length;
456 if (copy_from_user(&op, arg, sizeof(op)))
459 h = unmarshal_user_handle(op.handle);
464 h = nvmap_handle_get(h);
474 trace_nvmap_map_into_caller_ptr(client, h, op.offset,
475 op.length, op.flags);
476 down_read(¤t->mm->mmap_sem);
478 vma = find_vma(current->mm, op.addr);
484 if (op.offset & ~PAGE_MASK) {
489 if (op.offset >= h->size || op.length > h->size - op.offset) {
490 err = -EADDRNOTAVAIL;
494 /* the VMA must exactly match the requested mapping operation, and the
495 * VMA that is targetted must have been created by this driver
497 if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
498 (vma->vm_end-vma->vm_start != op.length)) {
503 /* verify that each mmap() system call creates a unique VMA */
504 if (vma->vm_private_data)
507 if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
512 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
518 vma->vm_flags |= (h->heap_pgalloc ? 0 : VM_PFNMAP);
520 priv->offs = op.offset;
521 vma->vm_private_data = priv;
522 vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
526 up_read(¤t->mm->mmap_sem);
533 int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
536 struct nvmap_handle_param_32 __user *uarg32 = arg;
538 struct nvmap_handle_param __user *uarg = arg;
539 struct nvmap_handle_param op;
540 struct nvmap_client *client = filp->private_data;
541 struct nvmap_handle_ref *ref;
542 struct nvmap_handle *h;
547 /* This is safe because the incoming value of result doesn't matter */
549 if (copy_from_user(&op, arg,
550 sizeof(struct nvmap_handle_param_32)))
554 if (copy_from_user(&op, arg, sizeof(op)))
557 h = unmarshal_user_handle(op.handle);
561 h = nvmap_handle_get(h);
565 nvmap_ref_lock(client);
566 ref = __nvmap_validate_locked(client, h);
567 if (IS_ERR_OR_NULL(ref)) {
568 err = ref ? PTR_ERR(ref) : -EINVAL;
572 err = nvmap_get_handle_param(client, ref, op.param, &result);
576 err = put_user((__u32)result, &uarg32->result);
579 err = put_user((unsigned long)result, &uarg->result);
582 nvmap_ref_unlock(client);
587 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
590 struct nvmap_client *client = filp->private_data;
591 struct nvmap_rw_handle __user *uarg = arg;
592 struct nvmap_rw_handle op;
594 struct nvmap_rw_handle_32 __user *uarg32 = arg;
595 struct nvmap_rw_handle_32 op32;
597 struct nvmap_handle *h;
603 if (copy_from_user(&op32, arg, sizeof(op32)))
606 op.handle = op32.handle;
607 op.offset = op32.offset;
608 op.elem_size = op32.elem_size;
609 op.hmem_stride = op32.hmem_stride;
610 op.user_stride = op32.user_stride;
611 op.count = op32.count;
614 if (copy_from_user(&op, arg, sizeof(op)))
617 h = unmarshal_user_handle(op.handle);
618 if (!h || !op.addr || !op.count || !op.elem_size)
621 h = nvmap_handle_get(h);
626 trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
627 op.addr, op.hmem_stride,
628 op.user_stride, op.elem_size, op.count);
629 copied = rw_handle(client, h, is_read, op.offset,
630 (unsigned long)op.addr, op.hmem_stride,
631 op.user_stride, op.elem_size, op.count);
637 } else if (copied < (op.count * op.elem_size))
642 __put_user(copied, &uarg32->count);
645 __put_user(copied, &uarg->count);
652 static int __nvmap_cache_maint(struct nvmap_client *client,
653 struct nvmap_cache_op *op)
655 struct vm_area_struct *vma;
656 struct nvmap_vma_priv *priv;
657 struct nvmap_handle *handle;
662 handle = unmarshal_user_handle(op->handle);
663 if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB ||
664 op->op > NVMAP_CACHE_OP_WB_INV)
667 down_read(¤t->mm->mmap_sem);
669 vma = find_vma(current->active_mm, (unsigned long)op->addr);
670 if (!vma || !is_nvmap_vma(vma) ||
671 (ulong)op->addr < vma->vm_start ||
672 (ulong)op->addr >= vma->vm_end ||
673 op->len > vma->vm_end - (ulong)op->addr) {
674 err = -EADDRNOTAVAIL;
678 priv = (struct nvmap_vma_priv *)vma->vm_private_data;
680 if (priv->handle != handle) {
685 start = (unsigned long)op->addr - vma->vm_start +
686 (vma->vm_pgoff << PAGE_SHIFT);
687 end = start + op->len;
689 err = __nvmap_do_cache_maint(client, priv->handle, start, end, op->op,
692 up_read(¤t->mm->mmap_sem);
696 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, bool is32)
698 struct nvmap_client *client = filp->private_data;
699 struct nvmap_cache_op op;
701 struct nvmap_cache_op_32 op32;
706 if (copy_from_user(&op32, arg, sizeof(op32)))
709 op.handle = op32.handle;
714 if (copy_from_user(&op, arg, sizeof(op)))
717 return __nvmap_cache_maint(client, &op);
720 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
722 struct nvmap_client *client = filp->private_data;
727 nvmap_free_handle_user_id(client, arg);
728 return sys_close(arg);
731 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
733 if (op == NVMAP_CACHE_OP_WB_INV)
734 dmac_flush_range(vaddr, vaddr + size);
735 else if (op == NVMAP_CACHE_OP_INV)
736 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
738 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
741 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
743 if (op == NVMAP_CACHE_OP_WB_INV)
744 outer_flush_range(paddr, paddr + size);
745 else if (op == NVMAP_CACHE_OP_INV)
746 outer_inv_range(paddr, paddr + size);
748 outer_clean_range(paddr, paddr + size);
751 static void heap_page_cache_maint(
752 struct nvmap_handle *h, unsigned long start, unsigned long end,
753 unsigned int op, bool inner, bool outer, pte_t **pte,
754 unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
756 if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
758 * zap user VA->PA mappings so that any access to the pages
759 * will result in a fault and can be marked dirty
761 nvmap_handle_mkclean(h, start, end-start);
762 nvmap_zap_handle(h, start, end - start);
765 #ifdef NVMAP_LAZY_VFREE
771 /* mutex lock protection is not necessary as it is
772 * already increased in __nvmap_do_cache_maint to
773 * protect from migrations.
775 nvmap_kmaps_inc_no_lock(h);
776 pages = nvmap_pages(h->pgalloc.pages,
777 h->size >> PAGE_SHIFT);
779 goto per_page_cache_maint;
780 vaddr = vm_map_ram(pages,
781 h->size >> PAGE_SHIFT, -1, prot);
783 (h->size >> PAGE_SHIFT) * sizeof(*pages));
785 if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr)) {
787 vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
790 /* Fast inner cache maintenance using single mapping */
791 inner_cache_maint(op, h->vaddr + start, end - start);
794 /* Skip per-page inner maintenance in loop below */
798 per_page_cache_maint:
803 while (start < end) {
810 page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
811 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
812 off = start & ~PAGE_MASK;
814 paddr = page_to_phys(page) + off;
817 void *vaddr = (void *)kaddr + off;
820 set_pte_at(&init_mm, kaddr, *pte,
821 pfn_pte(__phys_to_pfn(paddr), prot));
822 nvmap_flush_tlb_kernel_page(kaddr);
823 inner_cache_maint(op, vaddr, size);
827 outer_cache_maint(op, paddr, size);
832 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
833 static bool fast_cache_maint_outer(unsigned long start,
834 unsigned long end, unsigned int op)
837 if (end - start >= cache_maint_outer_threshold) {
838 if (op == NVMAP_CACHE_OP_WB_INV) {
842 if (op == NVMAP_CACHE_OP_WB) {
851 static inline bool fast_cache_maint_outer(unsigned long start,
852 unsigned long end, unsigned int op)
858 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
859 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
861 unsigned long end, unsigned int op)
863 if ((op == NVMAP_CACHE_OP_INV) ||
864 ((end - start) < cache_maint_inner_threshold))
869 static inline bool can_fast_cache_maint(struct nvmap_handle *h,
871 unsigned long end, unsigned int op)
877 static bool fast_cache_maint(struct nvmap_handle *h,
879 unsigned long end, unsigned int op,
880 bool clean_only_dirty)
882 if (!can_fast_cache_maint(h, start, end, op))
885 if (h->userflags & NVMAP_HANDLE_CACHE_SYNC) {
886 nvmap_handle_mkclean(h, 0, h->size);
887 nvmap_zap_handle(h, 0, h->size);
890 if (op == NVMAP_CACHE_OP_WB_INV)
891 inner_flush_cache_all();
892 else if (op == NVMAP_CACHE_OP_WB)
893 inner_clean_cache_all();
895 /* outer maintenance */
896 if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
897 if(!fast_cache_maint_outer(start, end, op))
899 if (h->heap_pgalloc) {
900 heap_page_cache_maint(h, start,
901 end, op, false, true, NULL, 0, 0,
906 pstart = start + h->carveout->base;
907 outer_cache_maint(op, pstart, end - start);
914 struct cache_maint_op {
918 struct nvmap_handle *h;
921 bool clean_only_dirty;
924 static int do_cache_maint(struct cache_maint_op *cache_work)
929 phys_addr_t pstart = cache_work->start;
930 phys_addr_t pend = cache_work->end;
933 struct nvmap_handle *h = cache_work->h;
934 struct nvmap_client *client;
935 unsigned int op = cache_work->op;
941 if (can_fast_cache_maint(h, pstart, pend, op))
942 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
944 nvmap_stats_inc(NS_CFLUSH_DONE, pend - pstart);
945 trace_nvmap_cache_maint(client, h, pstart, pend, op, pend - pstart);
946 trace_nvmap_cache_flush(pend - pstart,
947 nvmap_stats_read(NS_ALLOC),
948 nvmap_stats_read(NS_CFLUSH_RQ),
949 nvmap_stats_read(NS_CFLUSH_DONE));
952 if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
953 h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
956 if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
959 prot = nvmap_pgprot(h, PG_PROT_KERNEL);
960 pte = nvmap_alloc_pte(h->dev, (void **)&kaddr);
967 if (h->heap_pgalloc) {
968 heap_page_cache_maint(h, pstart, pend, op, true,
969 (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
972 cache_work->clean_only_dirty);
976 if (pstart > h->size || pend > h->size) {
977 pr_warn("cache maintenance outside handle\n");
982 pstart += h->carveout->base;
983 pend += h->carveout->base;
986 while (loop < pend) {
987 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
988 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
989 next = min(next, pend);
991 set_pte_at(&init_mm, kaddr, *pte,
992 pfn_pte(__phys_to_pfn(loop), prot));
993 nvmap_flush_tlb_kernel_page(kaddr);
995 inner_cache_maint(op, base, next - loop);
999 if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
1000 outer_cache_maint(op, pstart, pend - pstart);
1004 nvmap_free_pte(h->dev, pte);
1008 int __nvmap_do_cache_maint(struct nvmap_client *client,
1009 struct nvmap_handle *h,
1010 unsigned long start, unsigned long end,
1011 unsigned int op, bool clean_only_dirty)
1014 struct cache_maint_op cache_op;
1016 h = nvmap_handle_get(h);
1021 if (op == NVMAP_CACHE_OP_INV)
1022 op = NVMAP_CACHE_OP_WB_INV;
1024 /* clean only dirty is applicable only for Write Back operation */
1025 if (op != NVMAP_CACHE_OP_WB)
1026 clean_only_dirty = false;
1029 cache_op.start = start;
1032 cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
1033 h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1034 cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
1035 cache_op.clean_only_dirty = clean_only_dirty;
1037 nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
1038 err = do_cache_maint(&cache_op);
1040 nvmap_handle_put(h);
1044 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1045 unsigned long start, unsigned long rw_addr,
1046 unsigned long bytes, unsigned long kaddr, pte_t *pte)
1048 pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
1049 unsigned long end = start + bytes;
1052 while (!err && start < end) {
1053 struct page *page = NULL;
1058 if (!h->heap_pgalloc) {
1059 phys = h->carveout->base + start;
1062 nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
1065 phys = page_to_phys(page) + (start & ~PAGE_MASK);
1068 set_pte_at(&init_mm, kaddr, pte,
1069 pfn_pte(__phys_to_pfn(phys), prot));
1070 nvmap_flush_tlb_kernel_page(kaddr);
1072 src = (void *)kaddr + (phys & ~PAGE_MASK);
1073 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1074 count = min_t(size_t, end - start, phys);
1077 err = copy_to_user((void *)rw_addr, src, count);
1079 err = copy_from_user(src, (void *)rw_addr, count);
1094 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1095 int is_read, unsigned long h_offs,
1096 unsigned long sys_addr, unsigned long h_stride,
1097 unsigned long sys_stride, unsigned long elem_size,
1098 unsigned long count)
1111 if (elem_size == h_stride && elem_size == sys_stride) {
1113 h_stride = elem_size;
1114 sys_stride = elem_size;
1118 pte = nvmap_alloc_pte(nvmap_dev, &addr);
1120 return PTR_ERR(pte);
1123 if (h_offs + elem_size > h->size) {
1124 nvmap_warn(client, "read/write outside of handle\n");
1129 __nvmap_do_cache_maint(client, h, h_offs,
1130 h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
1132 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1133 elem_size, (unsigned long)addr, *pte);
1139 __nvmap_do_cache_maint(client, h, h_offs,
1140 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1143 copied += elem_size;
1144 sys_addr += sys_stride;
1148 nvmap_free_pte(nvmap_dev, pte);
1149 return ret ?: copied;
1152 int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
1153 bool is_reserve_ioctl)
1155 struct nvmap_cache_op_list op;
1159 struct nvmap_handle **refs;
1162 if (copy_from_user(&op, arg, sizeof(op)))
1168 if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
1171 if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
1174 if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
1177 if (!op.offsets || !op.sizes)
1180 refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
1185 handle_ptr = (u32 *)(uintptr_t)op.handles;
1186 offset_ptr = (u32 *)(uintptr_t)op.offsets;
1187 size_ptr = (u32 *)(uintptr_t)op.sizes;
1189 for (i = 0; i < op.nr; i++) {
1192 if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
1197 refs[i] = unmarshal_user_handle(handle);
1204 if (is_reserve_ioctl)
1205 err = nvmap_reserve_pages(refs, offset_ptr, size_ptr,
1208 err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr,