]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: clean cache during page allocations into page pool
authorKrishna Reddy <vdumpa@nvidia.com>
Tue, 5 Aug 2014 21:43:37 +0000 (14:43 -0700)
committerWinnie Hsu <whsu@nvidia.com>
Tue, 12 May 2015 20:28:52 +0000 (13:28 -0700)
Clean cache during page allocations into page pool to
avoid cache clean overhead at the time of allocation.
Increase page pool refill size to 1MB from 512KB.

Bug 1539190

Change-Id: I6c45782e54879541f7b518bbbb016383b24e376b
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/453197
Reviewed-by: Sri Krishna Chowdary <schowdary@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Michael I Gold <gold@nvidia.com>
[ccross: moved on top of background zeroing patches, replaced
 atomic with bool since it has to be protected by a lock anyways]
Signed-off-by: Colin Cross <ccross@android.com>
Reviewed-on: http://git-master/r/664676
Reviewed-on: http://git-master/r/736430
Tested-by: Alex Waterman <alexw@nvidia.com>
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_pp.c
drivers/video/tegra/nvmap/nvmap_priv.h

index 813496596a14d59128b034c43e744bd9c46b480c..b14035b2ac12be2fdfd531dbc9c7e604d1154c8d 100644 (file)
@@ -206,10 +206,11 @@ static int handle_page_alloc(struct nvmap_client *client,
         * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
         * to use the flush cache version.
         */
+       if (page_index < nr_page)
 #ifdef ARM64
-       nvmap_clean_cache(pages, nr_page);
+               nvmap_clean_cache(&pages[page_index], nr_page - page_index);
 #else
-       nvmap_flush_cache(pages, nr_page);
+               nvmap_flush_cache(&pages[page_index], nr_page - page_index);
 #endif
 
        h->size = size;
index af34da678261379c78b51ea4ac7d25a20a508932..a9f5c3eb4cfdf2402688ddbe3bc35eecd0793849 100644 (file)
@@ -34,7 +34,7 @@
 #include "nvmap_priv.h"
 
 #define NVMAP_TEST_PAGE_POOL_SHRINKER     1
-#define PENDING_PAGES_SIZE                32
+#define PENDING_PAGES_SIZE                (SZ_1M / PAGE_SIZE)
 
 static bool enable_pp = 1;
 static int pool_size;
@@ -59,6 +59,23 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
                                       struct page **pages, u32 nr);
 
+/*
+ * Make sure any data in the caches is cleaned out before
+ * passing these pages to userspace. otherwise, It can lead to
+ * corruption in pages that get mapped as something
+ * other than WB in userspace and leaked kernel data.
+ *
+ * Must be called with pool->lock held.
+ */
+static void pp_clean_cache(struct nvmap_page_pool *pool)
+{
+       if (pool->contains_dirty_pages) {
+               inner_clean_cache_all();
+               outer_clean_all();
+               pool->contains_dirty_pages = false;
+       }
+}
+
 static inline struct page *get_zero_list_page(struct nvmap_page_pool *pool)
 {
        struct page *page;
@@ -144,6 +161,13 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
 out:
        for (; ret < i; ret++)
                __free_page(pending_zero_pages[ret]);
+
+       /* clean cache in the background so that allocations immediately
+        * after fill don't suffer the cache clean overhead.
+        */
+       mutex_lock(&pool->lock);
+       pp_clean_cache(pool);
+       mutex_unlock(&pool->lock);
 }
 
 /*
@@ -197,6 +221,7 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
                BUG_ON(pool->count == 0);
 
+       pp_clean_cache(pool);
        page = get_page_list_page(pool);
        if (!page)
                return NULL;
@@ -228,6 +253,7 @@ int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
                return 0;
 
        mutex_lock(&pool->lock);
+       pp_clean_cache(pool);
 
        real_nr = min_t(u32, nr, pool->count);
 
@@ -267,6 +293,8 @@ static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        if (!enable_pp)
                return 0;
 
+       pool->contains_dirty_pages = true;
+
        real_nr = min_t(u32, pool->max - pool->count, nr);
        if (real_nr == 0)
                return 0;
index 0eeaee4d7e694694ec7b550fe62e059a64c67ce5..977897a61aa23a429ccc2429927c9dbba8e6f77d 100644 (file)
@@ -176,6 +176,7 @@ struct nvmap_page_pool {
        int to_zero; /* Number of pages on the zero list */
        struct list_head page_list;
        struct list_head zero_list;
+       bool contains_dirty_pages;
 
 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
        u64 allocs;