]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: support inserting pages on unreserve
authorSri Krishna chowdary <schowdary@nvidia.com>
Tue, 12 May 2015 07:12:49 +0000 (12:42 +0530)
committermobile promotions <svcmobile_promotions@nvidia.com>
Wed, 9 Dec 2015 02:23:26 +0000 (18:23 -0800)
Some times it is beneficial to insert pages into vma on unreserve.
This avoids page fault overhead for any accesses later. If user space
knows apriori which regions of a group of handles will be accessed for
sure, user space can pass relevant information with this new
NVMAP_INSERT_PAGE_ON_UNRESERVE option to reserve ioctl call.

Mark pages as writable, else write accesses after read fault nullifying the
purpose of the option.

Bug 200092803

Change-Id: Id44502b9a5d890d24f221e38c6ae781584ce7ace
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/781602
GVS: Gerrit_Virtual_Submit
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
drivers/video/tegra/nvmap/nvmap_mm.c
drivers/video/tegra/nvmap/nvmap_priv.h
include/linux/nvmap.h

index d7e8ceae2fae04fa1b6b7fd0c8423e00a53e752d..7746a73f210c61ef0182dda1e35f596a82f2dfea 100644 (file)
@@ -267,6 +267,74 @@ void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
                nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
 }
 
+void nvmap_vm_insert_handle(struct nvmap_handle *handle, u32 offset, u32 size)
+{
+       struct list_head *vmas;
+       struct nvmap_vma_list *vma_list;
+       struct vm_area_struct *vma;
+
+       if (!handle->heap_pgalloc)
+               return;
+
+       if (!size) {
+               offset = 0;
+               size = handle->size;
+       }
+
+       mutex_lock(&handle->lock);
+       vmas = &handle->vmas;
+       list_for_each_entry(vma_list, vmas, list) {
+               struct nvmap_vma_priv *priv;
+               u32 vm_size = size;
+               int end;
+               int i;
+
+               vma = vma_list->vma;
+               priv = vma->vm_private_data;
+               if ((offset + size) > (vma->vm_end - vma->vm_start))
+                       vm_size = vma->vm_end - vma->vm_start - offset;
+
+               end = PAGE_ALIGN(offset + vm_size) >> PAGE_SHIFT;
+               offset >>= PAGE_SHIFT;
+               for (i = offset; i < end; i++) {
+                       struct page *page = nvmap_to_page(handle->pgalloc.pages[i]);
+                       pte_t *pte;
+                       spinlock_t *ptl;
+
+                       down_write(&vma->vm_mm->mmap_sem);
+                       pte = get_locked_pte(vma->vm_mm, vma->vm_start + (i << PAGE_SHIFT), &ptl);
+                       if (!pte) {
+                               pr_err("nvmap: %s get_locked_pte failed\n", __func__);
+                               up_write(&vma->vm_mm->mmap_sem);
+                               mutex_unlock(&handle->lock);
+                               return;
+                       }
+                       /*
+                        * page->_map_count gets incremented while mapping here. If _count is not
+                        * incremented, zap code will see that page as a bad page and throws lot
+                        * of warnings.
+                        */
+                       atomic_inc(&page->_count);
+                       do_set_pte(vma, vma->vm_start + (i << PAGE_SHIFT), page, pte, true, false);
+                       pte_unmap_unlock(pte, ptl);
+                       up_write(&vma->vm_mm->mmap_sem);
+               }
+       }
+       mutex_unlock(&handle->lock);
+}
+
+void nvmap_vm_insert_handles(struct nvmap_handle **handles, u32 *offsets,
+                      u32 *sizes, u32 nr)
+{
+       int i;
+
+       for (i = 0; i < nr; i++) {
+               nvmap_vm_insert_handle(handles[i], offsets[i], sizes[i]);
+               nvmap_handle_mkdirty(handles[i], offsets[i],
+                                    sizes[i] ? sizes[i] : handles[i]->size);
+       }
+}
+
 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
                        u32 nr, u32 op)
 {
@@ -284,6 +352,8 @@ int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
 
        if (op == NVMAP_PAGES_RESERVE)
                nvmap_zap_handles(handles, offsets, sizes, nr);
+       else if (op == NVMAP_INSERT_PAGES_ON_UNRESERVE)
+               nvmap_vm_insert_handles(handles, offsets, sizes, nr);
 
        if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
                        return 0;
@@ -294,7 +364,8 @@ int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
                for (i = 0; i < nr; i++)
                        nvmap_handle_mkclean(handles[i], offsets[i],
                                             sizes[i] ? sizes[i] : handles[i]->size);
-       } else if (!handles[0]->heap_pgalloc) {
+       } else if ((op == NVMAP_PAGES_UNRESERVE) && handles[0]->heap_pgalloc) {
+       } else {
                nvmap_do_cache_maint_list(handles, offsets, sizes,
                                          NVMAP_CACHE_OP_WB_INV, nr);
        }
index 27f4da3e86cc74423d965fcc690c14ffe333b711..a96cb5a1acc3acb54717dfd08d884b948120ed95 100644 (file)
@@ -467,6 +467,9 @@ int nvmap_dmabuf_stash_init(void);
 void *nvmap_altalloc(size_t len);
 void nvmap_altfree(void *ptr, size_t len);
 
+void do_set_pte(struct vm_area_struct *vma, unsigned long address,
+               struct page *page, pte_t *pte, bool write, bool anon);
+
 static inline struct page *nvmap_to_page(struct page *page)
 {
        return (struct page *)((unsigned long)page & ~3UL);
@@ -543,6 +546,14 @@ static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
                atomic_sub(nchanged, &h->pgalloc.ndirty);
 }
 
+static inline void nvmap_handle_mkdirty(struct nvmap_handle *h,
+                                       u32 offset, u32 size)
+{
+       int nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty);
+       if (h->heap_pgalloc)
+               atomic_sub(nchanged, &h->pgalloc.ndirty);
+}
+
 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
                                             u32 offset, u32 size)
 {
index b1c84d7aea2670c028ef0175f80174e529548276..346d417de851b4da70380962491850a8ab64f240 100644 (file)
@@ -119,7 +119,8 @@ enum {
 
 enum {
        NVMAP_PAGES_UNRESERVE = 0,
-       NVMAP_PAGES_RESERVE
+       NVMAP_PAGES_RESERVE,
+       NVMAP_INSERT_PAGES_ON_UNRESERVE
 };
 
 struct nvmap_create_handle {