]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blobdiff - drivers/video/tegra/nvmap/nvmap_pp.c
video: tegra: nvmap: clean cache during page allocations into page pool
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_pp.c
index af34da678261379c78b51ea4ac7d25a20a508932..a9f5c3eb4cfdf2402688ddbe3bc35eecd0793849 100644 (file)
@@ -34,7 +34,7 @@
 #include "nvmap_priv.h"
 
 #define NVMAP_TEST_PAGE_POOL_SHRINKER     1
-#define PENDING_PAGES_SIZE                32
+#define PENDING_PAGES_SIZE                (SZ_1M / PAGE_SIZE)
 
 static bool enable_pp = 1;
 static int pool_size;
@@ -59,6 +59,23 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
                                       struct page **pages, u32 nr);
 
+/*
+ * Make sure any data in the caches is cleaned out before
+ * passing these pages to userspace. otherwise, It can lead to
+ * corruption in pages that get mapped as something
+ * other than WB in userspace and leaked kernel data.
+ *
+ * Must be called with pool->lock held.
+ */
+static void pp_clean_cache(struct nvmap_page_pool *pool)
+{
+       if (pool->contains_dirty_pages) {
+               inner_clean_cache_all();
+               outer_clean_all();
+               pool->contains_dirty_pages = false;
+       }
+}
+
 static inline struct page *get_zero_list_page(struct nvmap_page_pool *pool)
 {
        struct page *page;
@@ -144,6 +161,13 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
 out:
        for (; ret < i; ret++)
                __free_page(pending_zero_pages[ret]);
+
+       /* clean cache in the background so that allocations immediately
+        * after fill don't suffer the cache clean overhead.
+        */
+       mutex_lock(&pool->lock);
+       pp_clean_cache(pool);
+       mutex_unlock(&pool->lock);
 }
 
 /*
@@ -197,6 +221,7 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
                BUG_ON(pool->count == 0);
 
+       pp_clean_cache(pool);
        page = get_page_list_page(pool);
        if (!page)
                return NULL;
@@ -228,6 +253,7 @@ int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
                return 0;
 
        mutex_lock(&pool->lock);
+       pp_clean_cache(pool);
 
        real_nr = min_t(u32, nr, pool->count);
 
@@ -267,6 +293,8 @@ static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        if (!enable_pp)
                return 0;
 
+       pool->contains_dirty_pages = true;
+
        real_nr = min_t(u32, pool->max - pool->count, nr);
        if (real_nr == 0)
                return 0;