]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: clean cache during page allocations into page pool
authorKrishna Reddy <vdumpa@nvidia.com>
Tue, 5 Aug 2014 21:43:37 +0000 (14:43 -0700)
committerMatthew Pedro <mapedro@nvidia.com>
Fri, 15 Aug 2014 17:33:53 +0000 (10:33 -0700)
Clean cache during page allocations into page pool to
avoid cache clean overhead at the time of allocation.
Increase page pool refill size to 1MB from 512KB.

Bug 1539190

Change-Id: I046289f5e4b52986f477890663bbc7b2cda76c25
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/453197
(cherry picked from commit 39b0d737566adca113baac1de61f6550634da4ea)
Reviewed-on: http://git-master/r/456780
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Jon Mayo <jmayo@nvidia.com>
GVS: Gerrit_Virtual_Submit

drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_pp.c

index 2b37c35d09183d61ad9218e37eeb5269493082b6..c3587c3f6d145dc9d4e74d6e028573f01ed3cc83 100644 (file)
@@ -220,10 +220,11 @@ static int handle_page_alloc(struct nvmap_client *client,
         * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
         * to use the flush cache version.
         */
+       if (page_index < nr_page)
 #ifdef ARM64
-       nvmap_clean_cache(pages, nr_page);
+               nvmap_clean_cache(&pages[page_index], nr_page - page_index);
 #else
-       nvmap_flush_cache(pages, nr_page);
+               nvmap_flush_cache(&pages[page_index], nr_page - page_index);
 #endif
 
        h->size = size;
index 397995c16fb1656b9a2ec431c0aad10bdd077d35..4748155871a8d91eb3f725ce74262947ba028cfe 100644 (file)
@@ -32,7 +32,7 @@
 #include "nvmap_priv.h"
 
 #define NVMAP_TEST_PAGE_POOL_SHRINKER     1
-#define PENDING_PAGES_SIZE                128
+#define PENDING_PAGES_SIZE                (SZ_1M / PAGE_SIZE)
 #define MIN_AVAILABLE_MB                  128
 
 static bool enable_pp = 1;
@@ -41,6 +41,7 @@ static int pool_size;
 static struct task_struct *background_allocator;
 static struct page *pending_pages[PENDING_PAGES_SIZE];
 static atomic_t bg_pages_to_fill;
+static atomic_t pp_dirty;
 
 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
 static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
@@ -56,6 +57,21 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 #define pp_hit_add(pool, nr)   __pp_dbg_var_add(&(pool)->hits, nr)
 #define pp_miss_add(pool, nr)  __pp_dbg_var_add(&(pool)->misses, nr)
 
+static void pp_clean_cache(void)
+{
+       if (atomic_read(&pp_dirty)) {
+               /*
+                * Make sure any data in the caches is cleaned out before
+                * passing these pages to userspace. otherwise, It can lead to
+                * corruption in pages that get mapped as something
+                * other than WB in userspace and leaked kernel data.
+                */
+               inner_clean_cache_all();
+               outer_clean_all();
+               atomic_set(&pp_dirty, 0);
+       }
+}
+
 /*
  * Allocate n pages one by one. Not the most efficient allocation scheme ever;
  * however, it will make it easier later on to handle single or small number of
@@ -118,6 +134,7 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
                }
 
                nvmap_page_pool_lock(pool);
+               atomic_set(&pp_dirty, 1);
                i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
                nvmap_page_pool_unlock(pool);
                pages -= nr;
@@ -125,6 +142,10 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
 
        for (; i < nr; i++)
                __free_page(pending_pages[i]);
+       /* clean cache in the background so that allocations immediately
+        * after fill don't suffer the cache clean overhead.
+        */
+       pp_clean_cache();
 }
 
 /*
@@ -211,6 +232,7 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
                BUG_ON(pool->count == 0);
 
+       pp_clean_cache();
        page = pool->page_array[pool->alloc];
        pool->page_array[pool->alloc] = NULL;
        nvmap_pp_alloc_inc(pool);
@@ -244,6 +266,8 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
        if (!enable_pp || !pool->page_array)
                return 0;
 
+       pp_clean_cache();
+
        real_nr = min_t(u32, nr, pool->count);
 
        while (real_nr--) {
@@ -335,19 +359,6 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        return ind;
 }
 
-bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
-{
-       bool ret = false;
-
-       if (pool) {
-               nvmap_page_pool_lock(pool);
-               ret = nvmap_page_pool_fill_locked(pool, page);
-               nvmap_page_pool_unlock(pool);
-       }
-
-       return ret;
-}
-
 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
 {
        return pool->count;
@@ -682,6 +693,7 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
        pages_to_fill = pages_to_fill ? : pool->length;
 
        nvmap_page_pool_lock(pool);
+       atomic_set(&pp_dirty, 1);
        for (i = 0; i < pages_to_fill; i++) {
                page = alloc_page(GFP_NVMAP);
                if (!page)
@@ -700,6 +712,7 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
                highmem_pages, pool->length,
                info.totalram, info.freeram, info.totalhigh, info.freehigh);
 done:
+       pp_clean_cache();
        nvmap_page_pool_unlock(pool);
 #endif
        return 0;