]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/commitdiff
video: tegra: nvmap: use __nvmap_mmap to avoid code duplication
authorSri Krishna chowdary <schowdary@nvidia.com>
Tue, 1 Mar 2016 04:25:46 +0000 (09:55 +0530)
committerSri Krishna chowdary <schowdary@nvidia.com>
Tue, 5 Apr 2016 11:25:01 +0000 (16:55 +0530)
__nvmap_mmap() can be used to initialize h->vaddr in a handle
within the heap_page_cache_maint(). Since, lazy unmapping is default
now, do __nvmap_munmap() to remove extra reference held by handle
but leave the kernel h->vaddr mapping intact.

bug 1616899

Change-Id: Iea5a093e211f77ffe8649520e271857d0c1fa873
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/1021846
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
drivers/video/tegra/nvmap/nvmap_cache.c

index 95fb85bb67fe6f5eb64ed5b3209af655a3502dfc..9eafec5e659d177fc8297940c31955b853b9dd6d 100644 (file)
@@ -187,42 +187,21 @@ static void heap_page_cache_maint(
        }
 
        if (inner) {
-               void *vaddr = NULL;
-
                if (!h->vaddr) {
-                       struct page **pages;
-                       /* mutex lock protection is not necessary as it is
-                        * already increased in __nvmap_do_cache_maint to
-                        * protect from migrations.
-                        */
-                       nvmap_kmaps_inc_no_lock(h);
-                       pages = nvmap_pages(h->pgalloc.pages,
-                                           h->size >> PAGE_SHIFT);
-                       if (!pages)
+                       if (__nvmap_mmap(h))
+                               __nvmap_munmap(h, h->vaddr);
+                       else
                                goto per_page_cache_maint;
-                       vaddr = vm_map_ram(pages,
-                                       h->size >> PAGE_SHIFT, -1,
-                                       nvmap_pgprot(h, PG_PROT_KERNEL));
-                       nvmap_altfree(pages,
-                               (h->size >> PAGE_SHIFT) * sizeof(*pages));
-               }
-               if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr)) {
-                       nvmap_kmaps_dec(h);
-                       vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
-               }
-               if (h->vaddr) {
-                       /* Fast inner cache maintenance using single mapping */
-                       inner_cache_maint(op, h->vaddr + start, end - start);
-                       if (!outer)
-                               return;
-                       /* Skip per-page inner maintenance in loop below */
-                       inner = false;
                }
+               /* Fast inner cache maintenance using single mapping */
+               inner_cache_maint(op, h->vaddr + start, end - start);
+               if (!outer)
+                       return;
+               /* Skip per-page inner maintenance in loop below */
+               inner = false;
 
-per_page_cache_maint:
-               if (!h->vaddr)
-                       nvmap_kmaps_dec(h);
        }
+per_page_cache_maint:
 
        while (start < end) {
                struct page *page;