]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: support clean only dirty option on write back
authorSri Krishna chowdary <schowdary@nvidia.com>
Fri, 11 Apr 2014 19:21:47 +0000 (00:51 +0530)
committerTony Ly <tly@nvidia.com>
Wed, 7 May 2014 18:51:34 +0000 (11:51 -0700)
Modify cache maintenance operation to specify option whether to
clean only dirty pages or all pages from cache within the
specified range. This can help unnecessary overhead of cleaning
pages which are not dirty as well.

clean only dirty option is preferred for user space accesses since
dirty pages can be tracked only for those.

For kernel, since there is no mechanism to find out the dirty pages,
this option cannot be used.

Bug 1444151

Change-Id: Ib6df78a3fb926d1327f25bf9d1320a743381b2d9
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/395353
(cherry picked from commit 93f51facf143052e30e27bf1233a6d26e48ff996)
Reviewed-on: http://git-master/r/405130
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
drivers/video/tegra/nvmap/nvmap_dmabuf.c
drivers/video/tegra/nvmap/nvmap_ioctl.c
drivers/video/tegra/nvmap/nvmap_mm.c
drivers/video/tegra/nvmap/nvmap_priv.h

index cdbbb0c3ca4ced782157819838cd7eb0267db08b..159b2f2fc49763b3f7e7a5513e6c082ec4c7b7c4 100644 (file)
@@ -459,7 +459,7 @@ static int nvmap_dmabuf_begin_cpu_access(struct dma_buf *dmabuf,
 
        trace_nvmap_dmabuf_begin_cpu_access(dmabuf, start, len);
        return __nvmap_do_cache_maint(NULL, info->handle, start, start + len,
-                                     NVMAP_CACHE_OP_WB_INV);
+                                     NVMAP_CACHE_OP_WB_INV, false);
 }
 
 static void nvmap_dmabuf_end_cpu_access(struct dma_buf *dmabuf,
@@ -470,7 +470,7 @@ static void nvmap_dmabuf_end_cpu_access(struct dma_buf *dmabuf,
 
        trace_nvmap_dmabuf_end_cpu_access(dmabuf, start, len);
        __nvmap_do_cache_maint(NULL, info->handle, start, start + len,
-                                  NVMAP_CACHE_OP_WB);
+                                  NVMAP_CACHE_OP_WB, false);
 
 }
 
index 2e15c24a0a963216db34294352c05388c254be9e..17f6897de86163ebd3f18ea0d3419dd4e46f1f79 100644 (file)
@@ -684,7 +684,8 @@ static int __nvmap_cache_maint(struct nvmap_client *client,
                (vma->vm_pgoff << PAGE_SHIFT);
        end = start + op->len;
 
-       err = __nvmap_do_cache_maint(client, vpriv->handle, start, end, op->op);
+       err = __nvmap_do_cache_maint(client, vpriv->handle, start, end, op->op,
+                                    false);
 out:
        up_read(&current->mm->mmap_sem);
        return err;
@@ -748,7 +749,7 @@ static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
 static void heap_page_cache_maint(
        struct nvmap_handle *h, unsigned long start, unsigned long end,
        unsigned int op, bool inner, bool outer, pte_t **pte,
-       unsigned long kaddr, pgprot_t prot)
+       unsigned long kaddr, pgprot_t prot, bool clean_only_dirty)
 {
 #ifdef NVMAP_LAZY_VFREE
        if (inner) {
@@ -855,7 +856,8 @@ static inline bool can_fast_cache_maint(struct nvmap_handle *h,
 
 static bool fast_cache_maint(struct nvmap_handle *h,
        unsigned long start,
-       unsigned long end, unsigned int op)
+       unsigned long end, unsigned int op,
+       bool clean_only_dirty)
 {
        if (!can_fast_cache_maint(h, start, end, op))
                return false;
@@ -871,7 +873,8 @@ static bool fast_cache_maint(struct nvmap_handle *h,
                {
                        if (h->heap_pgalloc) {
                                heap_page_cache_maint(h, start,
-                                       end, op, false, true, NULL, 0, 0);
+                                       end, op, false, true, NULL, 0, 0,
+                                       clean_only_dirty);
                        } else  {
                                phys_addr_t pstart;
 
@@ -890,6 +893,7 @@ struct cache_maint_op {
        struct nvmap_handle *h;
        bool inner;
        bool outer;
+       bool clean_only_dirty;
 };
 
 static int do_cache_maint(struct cache_maint_op *cache_work)
@@ -924,7 +928,7 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
            h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
                goto out;
 
-       if (fast_cache_maint(h, pstart, pend, op))
+       if (fast_cache_maint(h, pstart, pend, op, cache_work->clean_only_dirty))
                goto out;
 
        prot = nvmap_pgprot(h, PG_PROT_KERNEL);
@@ -939,7 +943,8 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
                heap_page_cache_maint(h, pstart, pend, op, true,
                        (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
                                        false : true,
-                       pte, kaddr, prot);
+                       pte, kaddr, prot,
+                       cache_work->clean_only_dirty);
                goto out;
        }
 
@@ -978,7 +983,7 @@ out:
 int __nvmap_do_cache_maint(struct nvmap_client *client,
                        struct nvmap_handle *h,
                        unsigned long start, unsigned long end,
-                       unsigned int op)
+                       unsigned int op, bool clean_only_dirty)
 {
        int err;
        struct cache_maint_op cache_op;
@@ -990,6 +995,10 @@ int __nvmap_do_cache_maint(struct nvmap_client *client,
        if (op == NVMAP_CACHE_OP_INV)
                op = NVMAP_CACHE_OP_WB_INV;
 
+       /* clean only dirty is applicable only for Write Back operation */
+       if (op != NVMAP_CACHE_OP_WB)
+               clean_only_dirty = false;
+
        cache_op.h = h;
        cache_op.start = start;
        cache_op.end = end;
@@ -997,6 +1006,7 @@ int __nvmap_do_cache_maint(struct nvmap_client *client,
        cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
                         h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
        cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
+       cache_op.clean_only_dirty = clean_only_dirty;
 
        nvmap_stats_inc(NS_CFLUSH_RQ, end - start);
        err = do_cache_maint(&cache_op);
@@ -1090,7 +1100,7 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
                }
                if (is_read)
                        __nvmap_do_cache_maint(client, h, h_offs,
-                               h_offs + elem_size, NVMAP_CACHE_OP_INV);
+                               h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
 
                ret = rw_handle_page(h, is_read, h_offs, sys_addr,
                                     elem_size, (unsigned long)addr, *pte);
@@ -1100,7 +1110,8 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
 
                if (!is_read)
                        __nvmap_do_cache_maint(client, h, h_offs,
-                               h_offs + elem_size, NVMAP_CACHE_OP_WB_INV);
+                               h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
+                               false);
 
                copied += elem_size;
                sys_addr += sys_stride;
index 7f9dfbdd0065fe294e063d0eab2afc2378de7c0f..b22b705b6e5049d29e8eb0d1746654545c000471 100644 (file)
@@ -121,7 +121,7 @@ int nvmap_do_cache_maint_list(struct nvmap_handle **handles, int op, int nr)
                        err = __nvmap_do_cache_maint(handles[i]->owner,
                                                     handles[i], 0,
                                                     handles[i]->size,
-                                                    op);
+                                                    op, false);
                        if (err)
                                break;
                }
index 26c6e2124b50c7b5a3f36f4dc48c4d3a3c6e9ddb..2d39b157b73bb625146ff6a9d116af6c4613268a 100644 (file)
@@ -456,7 +456,7 @@ int __nvmap_get_handle_param(struct nvmap_client *client,
                             struct nvmap_handle *h, u32 param, u64 *result);
 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
                           unsigned long start, unsigned long end,
-                          unsigned int op);
+                          unsigned int op, bool clean_only_dirty);
 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
                                           const char *name);
 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);