]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
nvmap: mask off page type bits
authorSri Krishna chowdary <schowdary@nvidia.com>
Thu, 10 Apr 2014 18:01:30 +0000 (23:31 +0530)
committerTony Ly <tly@nvidia.com>
Wed, 7 May 2014 18:51:32 +0000 (11:51 -0700)
Mask off page type bits in page pointer during its usage.

bug 1444151

Change-Id: I8ebb6d92f0978d76189a3d366225f43ad5c4fc7e
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/394836
(cherry picked from commit 6552f9a86f808c786c630b5b134e55a1133b3bf0)
Reviewed-on: http://git-master/r/405128
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
drivers/video/tegra/nvmap/nvmap.c
drivers/video/tegra/nvmap/nvmap_dev.c
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_ioctl.c
drivers/video/tegra/nvmap/nvmap_mm.c

index 0a7c61396cc1e1dd7d0a273dea2f402c3b495eba..03aa64d730627d3f99713d81bdf3ba3b8dbe5e6d 100644 (file)
@@ -206,7 +206,7 @@ void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum)
                goto out;
 
        if (h->heap_pgalloc)
-               paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+               paddr = page_to_phys(nvmap_to_page(h->pgalloc.pages[pagenum]));
        else
                paddr = h->carveout->base + pagenum * PAGE_SIZE;
 
@@ -234,7 +234,7 @@ void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
                return;
 
        if (h->heap_pgalloc)
-               paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+               paddr = page_to_phys(nvmap_to_page(h->pgalloc.pages[pagenum]));
        else
                paddr = h->carveout->base + pagenum * PAGE_SIZE;
 
@@ -267,10 +267,20 @@ void *__nvmap_mmap(struct nvmap_handle *h)
        prot = nvmap_pgprot(h, PG_PROT_KERNEL);
 
 #ifdef NVMAP_LAZY_VFREE
+       struct page **pages;
        if (h->heap_pgalloc) {
-               if (!h->vaddr)
-                       h->vaddr = vm_map_ram(h->pgalloc.pages,
+               if (!h->vaddr) {
+                       pages = nvmap_pages(h->pgalloc.pages,
+                                           h->size >> PAGE_SHIFT);
+                       if (!pages)
+                               return NULL;
+                       vaddr = vm_map_ram(pages,
                                h->size >> PAGE_SHIFT, -1, prot);
+                       nvmap_altfree(pages,
+                               (h->size >> PAGE_SHIFT) * sizeof(*pages));
+               }
+               if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
+                       vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
                return h->vaddr;
        }
 #else
@@ -456,6 +466,7 @@ struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
 {
        struct sg_table *sgt = NULL;
        int err, npages;
+       struct page **pages;
 
        if (!virt_addr_valid(h))
                return ERR_PTR(-EINVAL);
@@ -477,8 +488,10 @@ struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
                        goto err;
                sg_set_buf(sgt->sgl, phys_to_virt(handle_phys(h)), h->size);
        } else {
-               err = sg_alloc_table_from_pages(sgt, h->pgalloc.pages,
+               pages = nvmap_pages(h->pgalloc.pages, npages);
+               err = sg_alloc_table_from_pages(sgt, pages,
                                npages, 0, h->size, GFP_KERNEL);
+               nvmap_altfree(pages, npages * sizeof(*pages));
                if (err)
                        goto err;
        }
index 53ed5dd9a876890bc7d1726bf91098ecdcdb59bc..82cccea2714e476761febcf587d586bd74fcea06 100644 (file)
@@ -833,7 +833,7 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                page = pfn_to_page(pfn);
        } else {
                offs >>= PAGE_SHIFT;
-               page = priv->handle->pgalloc.pages[offs];
+               page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
        }
 
        if (page)
@@ -976,7 +976,8 @@ static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
                }
 
                for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
-                       int mapcount = page_mapcount(h->pgalloc.pages[i]);
+                       struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+                       int mapcount = page_mapcount(page);
                        if (!mapcount)
                                *non_pss += PAGE_SIZE;
                        *total += PAGE_SIZE;
@@ -1064,7 +1065,8 @@ static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                        continue;
 
                for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
-                       int mapcount = page_mapcount(h->pgalloc.pages[i]);
+                       struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+                       int mapcount = page_mapcount(page);
                        if (!mapcount)
                                *non_pss += PAGE_SIZE;
                        *total += PAGE_SIZE;
index c513380651552b392f456a8888fdd6a1d21e13d2..b6d4eb10f9cdf968fd6a2b4ef861b9ece632ac11 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/dma-buf.h>
 #include <linux/moduleparam.h>
 #include <linux/nvmap.h>
@@ -106,6 +104,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
                vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
 #endif
 
+       for (i = 0; i < nr_page; i++)
+               h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
+
 #ifdef CONFIG_NVMAP_PAGE_POOLS
        pool = &nvmap_dev->pool;
 
index 74369264ffb1a142e7c5207413b1f0bd76dd220f..7e250b692d70b8afd2972d3399966f6b4abcb838 100644 (file)
@@ -757,10 +757,22 @@ static void heap_page_cache_maint(
        size_t size;
 
 #ifdef NVMAP_LAZY_VFREE
+       struct page **pages;
        if (inner) {
-               if (!h->vaddr)
-                       h->vaddr = vm_map_ram(h->pgalloc.pages,
+               void *vaddr = NULL;
+
+               if (!h->vaddr) {
+                       pages = nvmap_pages(h->pgalloc.pages,
+                                           h->size >> PAGE_SHIFT);
+                       if (!pages)
+                               goto per_page_cache_maint;
+                       vaddr = vm_map_ram(pages,
                                        h->size >> PAGE_SHIFT, -1, prot);
+                       nvmap_altfree(pages,
+                               (h->size >> PAGE_SHIFT) * sizeof(*page));
+               }
+               if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
+                       vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
                if (h->vaddr) {
                        /* Fast inner cache maintenance using single mapping */
                        inner_cache_maint(op, h->vaddr + start, end - start);
@@ -770,9 +782,11 @@ static void heap_page_cache_maint(
                        inner = false;
                }
        }
+per_page_cache_maint:
 #endif
+
        while (start < end) {
-               page = h->pgalloc.pages[start >> PAGE_SHIFT];
+               page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
                next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
                off = start & ~PAGE_MASK;
                size = next - start;
@@ -1007,7 +1021,8 @@ static int rw_handle_page(struct nvmap_handle *h, int is_read,
                if (!h->heap_pgalloc) {
                        phys = h->carveout->base + start;
                } else {
-                       page = h->pgalloc.pages[start >> PAGE_SHIFT];
+                       page =
+                          nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
                        BUG_ON(!page);
                        get_page(page);
                        phys = page_to_phys(page) + (start & ~PAGE_MASK);
index bb8ab75bf2970533f93796e8729df94e6102e1e5..7f9dfbdd0065fe294e063d0eab2afc2378de7c0f 100644 (file)
@@ -73,13 +73,14 @@ void nvmap_flush_cache(struct page **pages, int numpages)
                nvmap_stats_read(NS_CFLUSH_DONE));
 
        for (i = 0; i < numpages; i++) {
+               struct page *page = nvmap_to_page(pages[i]);
 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
                if (flush_inner)
-                       __flush_dcache_page(pages[i]);
+                       __flush_dcache_page(page);
 #else
                if (flush_inner)
-                       __flush_dcache_page(page_mapping(pages[i]), pages[i]);
-               base = page_to_phys(pages[i]);
+                       __flush_dcache_page(page_mapping(page), page);
+               base = page_to_phys(page);
                outer_flush_range(base, base + PAGE_SIZE);
 #endif
        }