]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: remove nvmap_vm_insert_handle()
authorSri Krishna chowdary <schowdary@nvidia.com>
Tue, 21 Jun 2016 14:34:46 +0000 (20:04 +0530)
committermobile promotions <svcmobile_promotions@nvidia.com>
Fri, 1 Jul 2016 00:30:18 +0000 (17:30 -0700)
Inserting new pages in a handle to a vma does not seem
to improve perf in most cases and so is more like overhead.
Remove this path. Accounting dirty pages should not hold
handle lock again from within nvmap_handle_mk(), so add
an argument to it to avoid taking the lock again from
_nvmap_handle_mkdirty().

This also avoids a coverity defect.

Coverity ID 32110

bug 200174682

Change-Id: I0a04f554dda1aa2f27dd81fcae08b7dc8adfe249
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
(cherry picked from commit 3a959fad6c958c0572b7a216168558382c029391)
Reviewed-on: http://git-master/r/1170230
GVS: Gerrit_Virtual_Submit
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
drivers/video/tegra/nvmap/nvmap_mm.c
drivers/video/tegra/nvmap/nvmap_priv.h

index b6b9aa863e6c43b06d71ad61336ab9a0a27617ad..101d1f2b770b9f853d1879aa55b7be7a8bf95750 100644 (file)
@@ -195,61 +195,6 @@ void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
        mutex_unlock(&handle->lock);
 }
 
-static int nvmap_vm_insert_handle(struct nvmap_handle *handle,
-                       struct vm_area_struct *vma, u32 vm_size)
-{
-       int i;
-       pte_t *start_pte = NULL;
-       pte_t *pte = NULL;
-       spinlock_t *ptl = NULL;
-       unsigned long curr_pmd = 0;
-       unsigned long addr = vma->vm_start;
-
-       vm_size >>= PAGE_SHIFT;
-       for (i = 0; i < vm_size; i++, addr += PAGE_SHIFT) {
-               if (curr_pmd != (addr & PMD_MASK)) {
-                       curr_pmd = addr & PMD_MASK;
-                       if (ptl && start_pte)
-                               pte_unmap_unlock(start_pte, ptl);
-                       else if (ptl)
-                               BUG();
-                       start_pte = pte = get_locked_pte(vma->vm_mm,
-                                                       addr, &ptl);
-               } else {
-                       pte++;
-               }
-               if (!pte) {
-                       pr_err("nvmap: %s get_locked_pte failed\n",
-                               __func__);
-                       if (ptl && start_pte)
-                               pte_unmap_unlock(start_pte, ptl);
-                       else if (ptl)
-                               BUG();
-                       return -ENOMEM;
-               }
-               if (pte_none(pte)) {
-                       struct page *page =
-                                       nvmap_to_page(handle->pgalloc.pages[i]);
-                       /*
-                        * page->_map_count gets incrmented while
-                        * mapping here. If _count is not incremented,
-                        * mm code will see that page as a bad page
-                        * and hits VM_BUG_ON
-                        */
-                       get_page(page);
-                       do_set_pte(vma, vma->vm_start + (i << PAGE_SHIFT), page,
-                                       pte, true, false);
-               }
-               nvmap_page_mkdirty(&handle->pgalloc.pages[i]);
-               atomic_inc(&handle->pgalloc.ndirty);
-       }
-       if (ptl && start_pte)
-               pte_unmap_unlock(start_pte, ptl);
-       else if (ptl)
-               BUG();
-       return 0;
-}
-
 static int nvmap_prot_handle(struct nvmap_handle *handle, u32 offset,
                u32 size, int op)
 {
@@ -310,9 +255,7 @@ static int nvmap_prot_handle(struct nvmap_handle *handle, u32 offset,
                                        vma_list->save_vm_flags);
                        if (err)
                                goto try_unlock;
-                       err = nvmap_vm_insert_handle(handle, vma, vm_size);
-                       if (err)
-                               goto try_unlock;
+                       _nvmap_handle_mkdirty(handle, 0, size);
                        break;
                default:
                        BUG();
index 9f3e5d6fcdd539e0fca84e565886529f41cdb425..b24e1691084ac7c3edd339f91d2a09a06c46df01 100644 (file)
@@ -509,18 +509,21 @@ static inline bool nvmap_page_mkclean(struct page **page)
  */
 static inline int nvmap_handle_mk(struct nvmap_handle *h,
                                  u32 offset, u32 size,
-                                 bool (*fn)(struct page **))
+                                 bool (*fn)(struct page **),
+                                 bool locked)
 {
        int i, nchanged = 0;
        int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
        int end_page = (offset + size) >> PAGE_SHIFT;
 
-       mutex_lock(&h->lock);
+       if (!locked)
+               mutex_lock(&h->lock);
        if (h->heap_pgalloc) {
                for (i = start_page; i < end_page; i++)
                        nchanged += fn(&h->pgalloc.pages[i]) ? 1 : 0;
        }
-       mutex_unlock(&h->lock);
+       if (!locked)
+               mutex_unlock(&h->lock);
        return nchanged;
 }
 
@@ -532,12 +535,12 @@ static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
        if (h->heap_pgalloc && !atomic_read(&h->pgalloc.ndirty))
                return;
 
-       nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
+       nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkclean, false);
        if (h->heap_pgalloc)
                atomic_sub(nchanged, &h->pgalloc.ndirty);
 }
 
-static inline void nvmap_handle_mkdirty(struct nvmap_handle *h,
+static inline void _nvmap_handle_mkdirty(struct nvmap_handle *h,
                                        u32 offset, u32 size)
 {
        int nchanged;
@@ -546,7 +549,7 @@ static inline void nvmap_handle_mkdirty(struct nvmap_handle *h,
                (atomic_read(&h->pgalloc.ndirty) == (h->size >> PAGE_SHIFT)))
                return;
 
-       nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty);
+       nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty, true);
        if (h->heap_pgalloc)
                atomic_add(nchanged, &h->pgalloc.ndirty);
 }