2 * drivers/video/tegra/nvmap/nvmap_pp.c
4 * Manage page pools to speed up page allocation.
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #define pr_fmt(fmt) "%s: " fmt, __func__
25 #include <linux/kernel.h>
26 #include <linux/vmalloc.h>
27 #include <linux/moduleparam.h>
28 #include <linux/shrinker.h>
29 #include <linux/kthread.h>
30 #include <linux/debugfs.h>
32 #include "nvmap_priv.h"
34 #define NVMAP_TEST_PAGE_POOL_SHRINKER 1
35 #define PENDING_PAGES_SIZE 128
36 #define MIN_AVAILABLE_MB 128
38 static bool enable_pp = 1;
41 static struct task_struct *background_allocator;
42 static struct page *pending_pages[PENDING_PAGES_SIZE];
43 static atomic_t bg_pages_to_fill;
45 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
46 static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
51 #define __pp_dbg_var_add(dbg_var, nr)
54 #define pp_alloc_add(pool, nr) __pp_dbg_var_add(&(pool)->allocs, nr)
55 #define pp_fill_add(pool, nr) __pp_dbg_var_add(&(pool)->fills, nr)
56 #define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
57 #define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
59 static inline struct page *get_page_list_page(struct nvmap_page_pool *pool)
63 if (list_empty(&pool->page_list))
66 page = list_first_entry(&pool->page_list, struct page, lru);
75 * Allocate n pages one by one. Not the most efficient allocation scheme ever;
76 * however, it will make it easier later on to handle single or small number of
77 * page allocations from the page pool being individually freed.
79 static int __nvmap_pp_alloc_n_pages(struct page **pages, int n, gfp_t flags)
83 for (i = 0; i < n; i++) {
84 pages[i] = alloc_page(flags);
92 for (i -= 1; i >= 0; i--)
93 __free_page(pages[i]);
98 * Actually do the fill. This requires a few steps:
100 * 1. Allocate a bunch of pages.
102 * 2. Fill the page pool with the allocated pages. We don't want to hold the
103 * PP lock for too long so this is the only time we hold the PP lock.
105 * 3. Rinse and repeat until we have allocated all the pages we think we need
106 * or the page pool is full. Since we are not holding the lock for the
107 * entire fill it is possible that other pages were filled into the pool.
109 * 4. Free any left over pages if the pool is filled before we finish.
111 static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
114 u32 pages = 0, nr, i;
115 gfp_t gfp = GFP_NVMAP | __GFP_NOMEMALLOC |
116 __GFP_NORETRY | __GFP_NO_KSWAPD;
118 pages = (u32)atomic_xchg(&bg_pages_to_fill, pages);
120 if (!pages || !enable_pp)
123 /* If this param is set, force zero page allocation. */
128 nr = min_t(u32, PENDING_PAGES_SIZE, pages);
129 err = __nvmap_pp_alloc_n_pages(pending_pages, nr, gfp);
131 pr_info("Failed to alloc %u pages for PP!\n", pages);
135 nvmap_page_pool_lock(pool);
136 i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
137 nvmap_page_pool_unlock(pool);
139 } while (pages && i == nr);
142 __free_page(pending_pages[i]);
146 * This thread fills the page pools with zeroed pages. We avoid releasing the
147 * pages directly back into the page pools since we would then have to zero
148 * them ourselves. Instead it is easier to just reallocate zeroed pages. This
149 * happens in the background so that the overhead of allocating zeroed pages is
150 * not directly seen by userspace. Of course if the page pools are empty user
153 static int nvmap_background_zero_allocator(void *arg)
155 pr_info("PP alloc thread starting.\n");
158 if (kthread_should_stop())
161 nvmap_pp_do_background_fill(&nvmap_dev->pool);
163 /* Pending work is done - go to sleep. */
164 set_current_state(TASK_INTERRUPTIBLE);
172 * Call this if the background allocator should possibly wake up. This function
173 * will check to make sure its actually a good idea for that to happen before
174 * waking the allocator up.
176 static inline void nvmap_pp_wake_up_allocator(void)
178 struct nvmap_page_pool *pool = &nvmap_dev->pool;
185 /* Hueristic: if we don't need to prefill explicitly zero'ed memory then
186 * lots of memory can be placed back in the pools by possible frees.
187 * Therefor don't fill the pool unless we really need to as we may get
188 * more memory without needing to alloc pages.
190 if (!zero_memory && pool->count > NVMAP_PP_ZERO_MEM_FILL_MIN)
193 if (pool->max - pool->count < NVMAP_PP_DEF_FILL_THRESH)
197 free_pages = (int)info.freeram;
199 tmp = free_pages - (MIN_AVAILABLE_MB << (20 - PAGE_SHIFT));
203 /* Let the background thread know how much memory to fill. */
204 atomic_set(&bg_pages_to_fill,
205 min(tmp, (int)(pool->max - pool->count)));
206 wake_up_process(background_allocator);
210 * This removes a page from the page pool. If ignore_disable is set, then
211 * the enable_pp flag is ignored.
213 static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
218 if (!force_alloc && !enable_pp)
221 if (list_empty(&pool->page_list)) {
222 pp_miss_add(pool, 1);
226 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
227 BUG_ON(pool->count == 0);
229 page = get_page_list_page(pool);
234 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
235 atomic_dec(&page->_count);
236 BUG_ON(atomic_read(&page->_count) != 1);
239 pp_alloc_add(pool, 1);
246 * Alloc a bunch of pages from the page pool. This will alloc as many as it can
247 * and return the number of pages allocated. Pages are placed into the passed
248 * array in a linear fashion starting from index 0.
250 * You must lock the page pool before using this.
252 int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
253 struct page **pages, u32 nr)
261 real_nr = min_t(u32, nr, pool->count);
265 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
266 BUG_ON(list_empty(&pool->page_list));
267 page = get_page_list_page(pool);
269 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
270 atomic_dec(&page->_count);
271 BUG_ON(atomic_read(&page->_count) != 1);
275 pp_alloc_add(pool, ind);
276 pp_hit_add(pool, ind);
277 pp_miss_add(pool, nr - ind);
278 nvmap_pp_wake_up_allocator();
284 * This adds a page to the pool. Returns true if the passed page is added.
285 * That means if the pool is full this operation will fail.
287 static bool nvmap_page_pool_fill_locked(struct nvmap_page_pool *pool,
293 if (pool->count >= pool->max)
297 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
298 atomic_inc(&page->_count);
299 BUG_ON(atomic_read(&page->_count) != 2);
300 BUG_ON(pool->count > pool->max);
303 list_add_tail(&page->lru, &pool->page_list);
305 pp_fill_add(pool, 1);
311 * Fill a bunch of pages into the page pool. This will fill as many as it can
312 * and return the number of pages filled. Pages are used from the start of the
313 * passed page pointer array in a linear fashion.
315 * You must lock the page pool before using this.
317 int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
318 struct page **pages, u32 nr)
326 real_nr = min_t(u32, pool->max - pool->count, nr);
331 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
332 atomic_inc(&pages[ind]->_count);
333 BUG_ON(atomic_read(&pages[ind]->_count) != 2);
335 list_add_tail(&pages[ind++]->lru, &pool->page_list);
339 pp_fill_add(pool, ind);
344 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
349 nvmap_page_pool_lock(pool);
350 ret = nvmap_page_pool_fill_locked(pool, page);
351 nvmap_page_pool_unlock(pool);
358 * Free the passed number of pages from the page pool. This happen irregardless
359 * of whether ther page pools are enabled. This lets one disable the page pools
360 * and then free all the memory therein.
362 static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
370 nvmap_page_pool_lock(pool);
372 page = nvmap_page_pool_alloc_locked(pool, 1);
378 nvmap_page_pool_unlock(pool);
383 ulong nvmap_page_pool_get_unused_pages(void)
390 total = nvmap_dev->pool.count;
396 * Remove and free to the system all the pages currently in the page
397 * pool. This operation will happen even if the page pools are disabled.
399 int nvmap_page_pool_clear(void)
402 struct nvmap_page_pool *pool = &nvmap_dev->pool;
404 nvmap_page_pool_lock(pool);
406 while ((page = nvmap_page_pool_alloc_locked(pool, 1)) != NULL)
409 /* For some reason, if an error occured... */
410 if (!list_empty(&pool->page_list)) {
411 nvmap_page_pool_unlock(pool);
415 nvmap_page_pool_unlock(pool);
416 nvmap_pp_wake_up_allocator();
422 * Resizes the page pool to the passed size. If the passed size is 0 then
423 * all associated resources are released back to the system. This operation
424 * will only occur if the page pools are enabled.
426 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
428 if (!enable_pp || size == pool->max || size < 0)
431 nvmap_page_pool_lock(pool);
433 while (pool->count > size)
434 __free_page(nvmap_page_pool_alloc_locked(pool, 0));
438 pr_debug("page pool resized to %d from %d pages\n", size, pool->max);
439 nvmap_page_pool_unlock(pool);
442 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
443 struct shrink_control *sc)
445 int shrink_pages = sc->nr_to_scan;
450 pr_debug("sh_pages=%d", shrink_pages);
452 shrink_pages = nvmap_page_pool_free(&nvmap_dev->pool, shrink_pages);
454 return nvmap_page_pool_get_unused_pages();
457 static struct shrinker nvmap_page_pool_shrinker = {
458 .shrink = nvmap_page_pool_shrink,
462 static void shrink_page_pools(int *total_pages, int *available_pages)
464 struct shrink_control sc;
466 if (*total_pages == 0) {
467 sc.gfp_mask = GFP_KERNEL;
469 *total_pages = nvmap_page_pool_shrink(NULL, &sc);
471 sc.nr_to_scan = *total_pages;
472 *available_pages = nvmap_page_pool_shrink(NULL, &sc);
475 #if NVMAP_TEST_PAGE_POOL_SHRINKER
476 static int shrink_pp;
477 static int shrink_set(const char *arg, const struct kernel_param *kp)
479 int cpu = smp_processor_id();
480 unsigned long long t1, t2;
481 int total_pages, available_pages;
483 param_set_int(arg, kp);
486 total_pages = shrink_pp;
488 shrink_page_pools(&total_pages, &available_pages);
490 pr_debug("shrink page pools: time=%lldns, "
491 "total_pages_released=%d, free_pages_available=%d",
492 t2-t1, total_pages, available_pages);
497 static int shrink_get(char *buff, const struct kernel_param *kp)
499 return param_get_int(buff, kp);
502 static struct kernel_param_ops shrink_ops = {
507 module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
510 static int enable_pp_set(const char *arg, const struct kernel_param *kp)
514 ret = param_set_bool(arg, kp);
519 nvmap_page_pool_clear();
524 static int enable_pp_get(char *buff, const struct kernel_param *kp)
526 return param_get_int(buff, kp);
529 static struct kernel_param_ops enable_pp_ops = {
530 .get = enable_pp_get,
531 .set = enable_pp_set,
534 module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
536 static int pool_size_set(const char *arg, const struct kernel_param *kp)
538 param_set_int(arg, kp);
539 nvmap_page_pool_resize(&nvmap_dev->pool, pool_size);
543 static int pool_size_get(char *buff, const struct kernel_param *kp)
545 return param_get_int(buff, kp);
548 static struct kernel_param_ops pool_size_ops = {
549 .get = pool_size_get,
550 .set = pool_size_set,
553 module_param_cb(pool_size, &pool_size_ops, &pool_size, 0644);
555 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
557 struct dentry *pp_root;
562 pp_root = debugfs_create_dir("pagepool", nvmap_root);
566 debugfs_create_u32("page_pool_available_pages",
568 &nvmap_dev->pool.count);
569 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
570 debugfs_create_u64("page_pool_allocs",
572 &nvmap_dev->pool.allocs);
573 debugfs_create_u64("page_pool_fills",
575 &nvmap_dev->pool.fills);
576 debugfs_create_u64("page_pool_hits",
578 &nvmap_dev->pool.hits);
579 debugfs_create_u64("page_pool_misses",
581 &nvmap_dev->pool.misses);
587 int nvmap_page_pool_init(struct nvmap_device *dev)
590 unsigned long totalram_mb;
592 struct nvmap_page_pool *pool = &dev->pool;
593 #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
597 int highmem_pages = 0;
600 memset(pool, 0x0, sizeof(*pool));
601 mutex_init(&pool->lock);
602 INIT_LIST_HEAD(&pool->page_list);
605 totalram_mb = (info.totalram * info.mem_unit) >> 20;
606 pr_info("Total MB RAM: %lu\n", totalram_mb);
608 if (!CONFIG_NVMAP_PAGE_POOL_SIZE)
609 /* The ratio is pool pages per 1K ram pages. So, the >> 10. */
610 pool->max = (info.totalram * NVMAP_PP_POOL_SIZE) >> 10;
612 pool->max = CONFIG_NVMAP_PAGE_POOL_SIZE;
614 if (pool->max >= info.totalram)
616 pool_size = pool->max;
618 pr_info("nvmap page pool size: %u pages (%u MB)\n", pool->max,
619 (pool->max * info.mem_unit) >> 20);
623 register_shrinker(&nvmap_page_pool_shrinker);
626 background_allocator = kthread_create(nvmap_background_zero_allocator,
628 if (IS_ERR_OR_NULL(background_allocator))
631 #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
632 pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M /
634 pages_to_fill = pages_to_fill ? : pool->count;
636 nvmap_page_pool_lock(pool);
637 atomic_set(&pp_dirty, 1);
638 for (i = 0; i < pages_to_fill; i++) {
639 page = alloc_page(GFP_NVMAP);
642 if (!nvmap_page_pool_fill_locked(pool, page)) {
646 if (PageHighMem(page))
651 pr_info("highmem=%d, pool_size=%d,"
652 "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n",
653 highmem_pages, pool->count,
654 info.totalram, info.freeram, info.totalhigh, info.freehigh);
657 nvmap_page_pool_unlock(pool);
661 nvmap_page_pool_fini(dev);
665 int nvmap_page_pool_fini(struct nvmap_device *dev)
667 struct nvmap_page_pool *pool = &dev->pool;
669 if (!IS_ERR_OR_NULL(background_allocator))
670 kthread_stop(background_allocator);
672 WARN_ON(!list_empty(&pool->page_list));