]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: use kernel api for virtual mapping
authorKrishna Reddy <vdumpa@nvidia.com>
Mon, 17 Mar 2014 23:41:44 +0000 (16:41 -0700)
committerKrishna Reddy <vdumpa@nvidia.com>
Wed, 19 Mar 2014 18:15:10 +0000 (11:15 -0700)
Use kernel api for kernel virtual mapping needs.

Change-Id: I6ce18a1f75a0951d220a2b991eb24364494f00a8
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/382716
Reviewed-by: Alex Waterman <alexw@nvidia.com>
drivers/video/tegra/nvmap/nvmap.c
drivers/video/tegra/nvmap/nvmap_dev.c
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_ioctl.c

index 0a7c61396cc1e1dd7d0a273dea2f402c3b495eba..70a6c62349c8e40cead66a3acebff952673af840 100644 (file)
@@ -189,7 +189,7 @@ void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum)
        phys_addr_t paddr;
        unsigned long kaddr;
        pgprot_t prot;
-       pte_t **pte;
+       struct vm_struct *area = NULL;
 
        if (!virt_addr_valid(h))
                return NULL;
@@ -201,18 +201,17 @@ void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum)
        if (pagenum >= h->size >> PAGE_SHIFT)
                goto out;
        prot = nvmap_pgprot(h, PG_PROT_KERNEL);
-       pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
-       if (!pte)
+       area = alloc_vm_area(PAGE_SIZE, NULL);
+       if (!area)
                goto out;
+       kaddr = (ulong)area->addr;
 
        if (h->heap_pgalloc)
                paddr = page_to_phys(h->pgalloc.pages[pagenum]);
        else
                paddr = h->carveout->base + pagenum * PAGE_SIZE;
 
-       set_pte_at(&init_mm, kaddr, *pte,
-                               pfn_pte(__phys_to_pfn(paddr), prot));
-       nvmap_flush_tlb_kernel_page(kaddr);
+       ioremap_page_range(kaddr, kaddr + PAGE_SIZE, paddr, prot);
        return (void *)kaddr;
 out:
        nvmap_handle_put(h);
@@ -223,7 +222,7 @@ void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
                  void *addr)
 {
        phys_addr_t paddr;
-       pte_t **pte;
+       struct vm_struct *area = NULL;
 
        if (!h ||
            WARN_ON(!virt_addr_valid(h)) ||
@@ -244,8 +243,11 @@ void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
                outer_flush_range(paddr, paddr + PAGE_SIZE); /* FIXME */
        }
 
-       pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
-       nvmap_free_pte(nvmap_dev, pte);
+       area = find_vm_area(addr);
+       if (area)
+               free_vm_area(area);
+       else
+               WARN(1, "Invalid address passed");
        nvmap_handle_put(h);
 }
 
@@ -253,7 +255,6 @@ void *__nvmap_mmap(struct nvmap_handle *h)
 {
        pgprot_t prot;
        unsigned long adj_size;
-       unsigned long offs;
        struct vm_struct *v;
        void *p;
 
@@ -280,7 +281,6 @@ void *__nvmap_mmap(struct nvmap_handle *h)
 
 #endif
        /* carveout - explicitly map the pfns into a vmalloc area */
-
        adj_size = h->carveout->base & ~PAGE_MASK;
        adj_size += h->size;
        adj_size = PAGE_ALIGN(adj_size);
@@ -292,35 +292,8 @@ void *__nvmap_mmap(struct nvmap_handle *h)
        }
 
        p = v->addr + (h->carveout->base & ~PAGE_MASK);
-
-       for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
-               unsigned long addr = (unsigned long) v->addr + offs;
-               unsigned int pfn;
-               pgd_t *pgd;
-               pud_t *pud;
-               pmd_t *pmd;
-               pte_t *pte;
-
-               pfn = __phys_to_pfn(h->carveout->base + offs);
-               pgd = pgd_offset_k(addr);
-               pud = pud_alloc(&init_mm, pgd, addr);
-               if (!pud)
-                       break;
-               pmd = pmd_alloc(&init_mm, pud, addr);
-               if (!pmd)
-                       break;
-               pte = pte_alloc_kernel(pmd, addr);
-               if (!pte)
-                       break;
-               set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
-               nvmap_flush_tlb_kernel_page(addr);
-       }
-
-       if (offs != adj_size) {
-               free_vm_area(v);
-               nvmap_handle_put(h);
-               return NULL;
-       }
+       ioremap_page_range((ulong)v->addr, (ulong)v->addr + adj_size,
+               h->carveout->base & PAGE_MASK, prot);
 
        /* leave the handle ref count incremented by 1, so that
         * the handle will not be freed while the kernel mapping exists.
@@ -346,9 +319,9 @@ void __nvmap_munmap(struct nvmap_handle *h, void *addr)
        } else {
                struct vm_struct *vm;
                addr -= (h->carveout->base & ~PAGE_MASK);
-               vm = remove_vm_area(addr);
+               vm = find_vm_area(addr);
                BUG_ON(!vm);
-               kfree(vm);
+               free_vm_area(vm);
        }
        nvmap_handle_put(h);
 }
index ca0494f11f2b0e894cb7e6fde08234640c260c98..ba08f46367a0f918eb2a094140a6d23a4da26803 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/bitmap.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/oom.h>
@@ -257,11 +258,10 @@ unsigned long nvmap_carveout_usage(struct nvmap_client *c,
 int nvmap_flush_heap_block(struct nvmap_client *client,
        struct nvmap_heap_block *block, size_t len, unsigned int prot)
 {
-       pte_t **pte;
-       void *addr;
-       uintptr_t kaddr;
+       ulong kaddr;
        phys_addr_t phys = block->base;
        phys_addr_t end = block->base + len;
+       struct vm_struct *area = NULL;
 
        if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
                goto out;
@@ -275,28 +275,29 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
        }
 #endif
 
-       pte = nvmap_alloc_pte(nvmap_dev, &addr);
-       if (IS_ERR(pte))
-               return PTR_ERR(pte);
+       area = alloc_vm_area(PAGE_SIZE, NULL);
+       if (!area)
+               return -ENOMEM;
 
-       kaddr = (uintptr_t)addr;
+       kaddr = (ulong)area->addr;
 
        while (phys < end) {
                phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
-               unsigned long pfn = __phys_to_pfn(phys);
                void *base = (void *)kaddr + (phys & ~PAGE_MASK);
 
                next = min(next, end);
-               set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, PG_PROT_KERNEL));
+               ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+                       phys, PG_PROT_KERNEL);
                nvmap_flush_tlb_kernel_page(kaddr);
                FLUSH_DCACHE_AREA(base, next - phys);
                phys = next;
+               unmap_kernel_range(kaddr, PAGE_SIZE);
        }
 
        if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
                outer_flush_range(block->base, block->base + len);
 
-       nvmap_free_pte(nvmap_dev, pte);
+       free_vm_area(area);
 out:
        wmb();
        return 0;
index c6ba359b5d5dc028a78d437faece295ee76c0f1d..b1613e2161026686e9018ba3f14f533e86e22138 100644 (file)
@@ -23,6 +23,7 @@
 #define pr_fmt(fmt)    "%s: " fmt, __func__
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/mm.h>
@@ -155,15 +156,16 @@ static int handle_page_alloc(struct nvmap_client *client,
        phys_addr_t paddr;
 #endif
        gfp_t gfp = GFP_NVMAP;
-       unsigned long kaddr;
-       pte_t **pte = NULL;
+       unsigned long kaddr = 0;
+       struct vm_struct *area = NULL;
 
        if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory) {
                gfp |= __GFP_ZERO;
                prot = nvmap_pgprot(h, PG_PROT_KERNEL);
-               pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
-               if (IS_ERR(pte))
+               area = alloc_vm_area(PAGE_SIZE, NULL);
+               if (!area)
                        return -ENOMEM;
+               kaddr = (ulong)area->addr;
        }
 
        pages = altalloc(nr_page * sizeof(*pages));
@@ -205,11 +207,12 @@ static int handle_page_alloc(struct nvmap_client *client,
                                               PAGE_SIZE);
                                } else {
                                        paddr = page_to_phys(pages[i]);
-                                       set_pte_at(&init_mm, kaddr, *pte,
-                                                  pfn_pte(__phys_to_pfn(paddr),
-                                                          prot));
+                                       ioremap_page_range(kaddr,
+                                               kaddr + PAGE_SIZE,
+                                               paddr, prot);
                                        nvmap_flush_tlb_kernel_page(kaddr);
                                        memset((char *)kaddr, 0, PAGE_SIZE);
+                                       unmap_kernel_range(kaddr, PAGE_SIZE);
                                }
                        }
                }
@@ -234,7 +237,7 @@ static int handle_page_alloc(struct nvmap_client *client,
                goto fail;
 
        if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory)
-               nvmap_free_pte(nvmap_dev, pte);
+               free_vm_area(area);
        h->size = size;
        h->pgalloc.pages = pages;
        h->pgalloc.contig = contiguous;
@@ -242,7 +245,7 @@ static int handle_page_alloc(struct nvmap_client *client,
 
 fail:
        if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory)
-               nvmap_free_pte(nvmap_dev, pte);
+               free_vm_area(area);
        while (i--)
                __free_page(pages[i]);
        altfree(pages, nr_page * sizeof(*pages));
index 5de8b72adabfb8468a277eb51b8899645854e86a..74395b6963ee7f6c103040e660d676d3479e3f62 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/export.h>
 #include <linux/fs.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
@@ -747,7 +748,7 @@ static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
 
 static void heap_page_cache_maint(
        struct nvmap_handle *h, unsigned long start, unsigned long end,
-       unsigned int op, bool inner, bool outer, pte_t **pte,
+       unsigned int op, bool inner, bool outer,
        unsigned long kaddr, pgprot_t prot)
 {
        struct page *page;
@@ -780,12 +781,12 @@ static void heap_page_cache_maint(
 
                if (inner) {
                        void *vaddr = (void *)kaddr + off;
-                       BUG_ON(!pte);
                        BUG_ON(!kaddr);
-                       set_pte_at(&init_mm, kaddr, *pte,
-                               pfn_pte(__phys_to_pfn(paddr), prot));
+                       ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+                               paddr, prot);
                        nvmap_flush_tlb_kernel_page(kaddr);
                        inner_cache_maint(op, vaddr, size);
+                       unmap_kernel_range(kaddr, PAGE_SIZE);
                }
 
                if (outer)
@@ -857,7 +858,7 @@ static bool fast_cache_maint(struct nvmap_handle *h,
                {
                        if (h->heap_pgalloc) {
                                heap_page_cache_maint(h, start,
-                                       end, op, false, true, NULL, 0, 0);
+                                       end, op, false, true, 0, 0);
                        } else  {
                                phys_addr_t pstart;
 
@@ -881,7 +882,6 @@ struct cache_maint_op {
 static int do_cache_maint(struct cache_maint_op *cache_work)
 {
        pgprot_t prot;
-       pte_t **pte = NULL;
        unsigned long kaddr;
        phys_addr_t pstart = cache_work->start;
        phys_addr_t pend = cache_work->end;
@@ -890,6 +890,7 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
        struct nvmap_handle *h = cache_work->h;
        struct nvmap_client *client;
        unsigned int op = cache_work->op;
+       struct vm_struct *area = NULL;
 
        if (!h || !h->alloc)
                return -EFAULT;
@@ -914,18 +915,17 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
                goto out;
 
        prot = nvmap_pgprot(h, PG_PROT_KERNEL);
-       pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
-       if (IS_ERR(pte)) {
-               err = PTR_ERR(pte);
-               pte = NULL;
+       area = alloc_vm_area(PAGE_SIZE, NULL);
+       if (!area) {
+               err = -ENOMEM;
                goto out;
        }
+       kaddr = (ulong)area->addr;
 
        if (h->heap_pgalloc) {
                heap_page_cache_maint(h, pstart, pend, op, true,
                        (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
-                                       false : true,
-                       pte, kaddr, prot);
+                       false : true, kaddr, prot);
                goto out;
        }
 
@@ -944,20 +944,21 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
                void *base = (void *)kaddr + (loop & ~PAGE_MASK);
                next = min(next, pend);
 
-               set_pte_at(&init_mm, kaddr, *pte,
-                          pfn_pte(__phys_to_pfn(loop), prot));
+               ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+                       loop, prot);
                nvmap_flush_tlb_kernel_page(kaddr);
 
                inner_cache_maint(op, base, next - loop);
                loop = next;
+               unmap_kernel_range(kaddr, PAGE_SIZE);
        }
 
        if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
                outer_cache_maint(op, pstart, pend - pstart);
 
 out:
-       if (pte)
-               nvmap_free_pte(nvmap_dev, pte);
+       if (area)
+               free_vm_area(area);
        return err;
 }
 
@@ -989,7 +990,7 @@ int __nvmap_do_cache_maint(struct nvmap_client *client,
 
 static int rw_handle_page(struct nvmap_handle *h, int is_read,
                          unsigned long start, unsigned long rw_addr,
-                         unsigned long bytes, unsigned long kaddr, pte_t *pte)
+                         unsigned long bytes, unsigned long kaddr)
 {
        pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
        unsigned long end = start + bytes;
@@ -1010,9 +1011,7 @@ static int rw_handle_page(struct nvmap_handle *h, int is_read,
                        phys = page_to_phys(page) + (start & ~PAGE_MASK);
                }
 
-               set_pte_at(&init_mm, kaddr, pte,
-                          pfn_pte(__phys_to_pfn(phys), prot));
-               nvmap_flush_tlb_kernel_page(kaddr);
+               ioremap_page_range(kaddr, kaddr + PAGE_SIZE, phys, prot);
 
                src = (void *)kaddr + (phys & ~PAGE_MASK);
                phys = PAGE_SIZE - (phys & ~PAGE_MASK);
@@ -1031,6 +1030,7 @@ static int rw_handle_page(struct nvmap_handle *h, int is_read,
 
                if (page)
                        put_page(page);
+               unmap_kernel_range(kaddr, PAGE_SIZE);
        }
 
        return err;
@@ -1043,9 +1043,9 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
                         unsigned long count)
 {
        ssize_t copied = 0;
-       pte_t **pte;
        void *addr;
        int ret = 0;
+       struct vm_struct *area;
 
        if (!elem_size)
                return -EINVAL;
@@ -1060,9 +1060,10 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
                count = 1;
        }
 
-       pte = nvmap_alloc_pte(nvmap_dev, &addr);
-       if (IS_ERR(pte))
-               return PTR_ERR(pte);
+       area = alloc_vm_area(PAGE_SIZE, NULL);
+       if (!area)
+               return -ENOMEM;
+       addr = area->addr;
 
        while (count--) {
                if (h_offs + elem_size > h->size) {
@@ -1076,7 +1077,7 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
                                CACHE_MAINT_IMMEDIATE);
 
                ret = rw_handle_page(h, is_read, h_offs, sys_addr,
-                                    elem_size, (unsigned long)addr, *pte);
+                                    elem_size, (unsigned long)addr);
 
                if (ret)
                        break;
@@ -1091,6 +1092,6 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
                h_offs += h_stride;
        }
 
-       nvmap_free_pte(nvmap_dev, pte);
+       free_vm_area(area);
        return ret ?: copied;
 }