#include "nvmap_priv.h"
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
-#define PENDING_PAGES_SIZE 128
+#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
#define MIN_AVAILABLE_MB 128
static bool enable_pp = 1;
static struct task_struct *background_allocator;
static struct page *pending_pages[PENDING_PAGES_SIZE];
static atomic_t bg_pages_to_fill;
+static atomic_t pp_dirty;
#ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
#define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
#define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
+static void pp_clean_cache(void)
+{
+ if (atomic_read(&pp_dirty)) {
+ /*
+ * Make sure any data in the caches is cleaned out before
+ * passing these pages to userspace. otherwise, It can lead to
+ * corruption in pages that get mapped as something
+ * other than WB in userspace and leaked kernel data.
+ */
+ inner_clean_cache_all();
+ outer_clean_all();
+ atomic_set(&pp_dirty, 0);
+ }
+}
+
/*
* Allocate n pages one by one. Not the most efficient allocation scheme ever;
* however, it will make it easier later on to handle single or small number of
}
nvmap_page_pool_lock(pool);
+ atomic_set(&pp_dirty, 1);
i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
nvmap_page_pool_unlock(pool);
pages -= nr;
for (; i < nr; i++)
__free_page(pending_pages[i]);
+ /* clean cache in the background so that allocations immediately
+ * after fill don't suffer the cache clean overhead.
+ */
+ pp_clean_cache();
}
/*
if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
BUG_ON(pool->count == 0);
+ pp_clean_cache();
page = pool->page_array[pool->alloc];
pool->page_array[pool->alloc] = NULL;
nvmap_pp_alloc_inc(pool);
if (!enable_pp || !pool->page_array)
return 0;
+ pp_clean_cache();
+
real_nr = min_t(u32, nr, pool->count);
while (real_nr--) {
return ind;
}
-bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
-{
- bool ret = false;
-
- if (pool) {
- nvmap_page_pool_lock(pool);
- ret = nvmap_page_pool_fill_locked(pool, page);
- nvmap_page_pool_unlock(pool);
- }
-
- return ret;
-}
-
static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
{
return pool->count;
pages_to_fill = pages_to_fill ? : pool->length;
nvmap_page_pool_lock(pool);
+ atomic_set(&pp_dirty, 1);
for (i = 0; i < pages_to_fill; i++) {
page = alloc_page(GFP_NVMAP);
if (!page)
highmem_pages, pool->length,
info.totalram, info.freeram, info.totalhigh, info.freehigh);
done:
+ pp_clean_cache();
nvmap_page_pool_unlock(pool);
#endif
return 0;