#include "nvmap_priv.h"
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
-#define PENDING_PAGES_SIZE 32
+#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
static bool enable_pp = 1;
static int pool_size;
static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
struct page **pages, u32 nr);
+/*
+ * Make sure any data in the caches is cleaned out before
+ * passing these pages to userspace. otherwise, It can lead to
+ * corruption in pages that get mapped as something
+ * other than WB in userspace and leaked kernel data.
+ *
+ * Must be called with pool->lock held.
+ */
+static void pp_clean_cache(struct nvmap_page_pool *pool)
+{
+ if (pool->contains_dirty_pages) {
+ inner_clean_cache_all();
+ outer_clean_all();
+ pool->contains_dirty_pages = false;
+ }
+}
+
static inline struct page *get_zero_list_page(struct nvmap_page_pool *pool)
{
struct page *page;
out:
for (; ret < i; ret++)
__free_page(pending_zero_pages[ret]);
+
+ /* clean cache in the background so that allocations immediately
+ * after fill don't suffer the cache clean overhead.
+ */
+ mutex_lock(&pool->lock);
+ pp_clean_cache(pool);
+ mutex_unlock(&pool->lock);
}
/*
if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
BUG_ON(pool->count == 0);
+ pp_clean_cache(pool);
page = get_page_list_page(pool);
if (!page)
return NULL;
return 0;
mutex_lock(&pool->lock);
+ pp_clean_cache(pool);
real_nr = min_t(u32, nr, pool->count);
if (!enable_pp)
return 0;
+ pool->contains_dirty_pages = true;
+
real_nr = min_t(u32, pool->max - pool->count, nr);
if (real_nr == 0)
return 0;