]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: cache sync even when CacheSyncAtReserve is present
authorSri Krishna chowdary <schowdary@nvidia.com>
Wed, 29 Jul 2015 05:54:59 +0000 (11:24 +0530)
committermobile promotions <svcmobile_promotions@nvidia.com>
Mon, 17 Aug 2015 19:44:35 +0000 (12:44 -0700)
user space takes responsibility of not calling reserve and cache op at same
time. Given that promise, some times it is better for performance to avoid zap
when concurrent access by cpu and device is guaranteed in some other way.

Bug 200092803

Change-Id: I20d7e2106fd6114712b63991204d5e556912af21
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/776097
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
drivers/video/tegra/nvmap/nvmap_ioctl.c

index 58d20d657fb01a63a481d66695d489a5a339bb64..0f1b9efbea5bbeb3903865e87ab45bb79ca80625 100644 (file)
@@ -642,9 +642,6 @@ static int __nvmap_cache_maint(struct nvmap_client *client,
        if (!handle)
                return -EINVAL;
 
-       if (handle->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE)
-               goto put_handle;
-
        down_read(&current->mm->mmap_sem);
 
        vma = find_vma(current->active_mm, (unsigned long)op->addr);
@@ -671,7 +668,6 @@ static int __nvmap_cache_maint(struct nvmap_client *client,
                                     false);
 out:
        up_read(&current->mm->mmap_sem);
-put_handle:
        nvmap_handle_put(handle);
        return err;
 }
@@ -1319,12 +1315,6 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
                goto free_mem;
        }
 
-       /* skip cache op when NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE is specified */
-       if (count && !is_reserve_ioctl) {
-               err = 0;
-               goto free_mem;
-       }
-
        /*
         * When NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE is specified, mix can cause
         * cache WB_INV at unreserve op on iovmm handles increasing overhead.