]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_pp.c
video: tegra: nvmap: clean cache during page allocations into page pool
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_pp.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_pp.c
3  *
4  * Manage page pools to speed up page allocation.
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt) "%s: " fmt, __func__
24
25 #include <linux/kernel.h>
26 #include <linux/vmalloc.h>
27 #include <linux/moduleparam.h>
28 #include <linux/shrinker.h>
29 #include <linux/kthread.h>
30 #include <linux/debugfs.h>
31
32 #include "nvmap_priv.h"
33
34 #define NVMAP_TEST_PAGE_POOL_SHRINKER     1
35 #define PENDING_PAGES_SIZE                (SZ_1M / PAGE_SIZE)
36 #define MIN_AVAILABLE_MB                  128
37
38 static bool enable_pp = 1;
39 static int pool_size;
40
41 static struct task_struct *background_allocator;
42 static struct page *pending_pages[PENDING_PAGES_SIZE];
43 static atomic_t bg_pages_to_fill;
44 static atomic_t pp_dirty;
45
46 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
47 static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
48 {
49         *dbg_var += nr;
50 }
51 #else
52 #define __pp_dbg_var_add(dbg_var, nr)
53 #endif
54
55 #define pp_alloc_add(pool, nr) __pp_dbg_var_add(&(pool)->allocs, nr)
56 #define pp_fill_add(pool, nr)  __pp_dbg_var_add(&(pool)->fills, nr)
57 #define pp_hit_add(pool, nr)   __pp_dbg_var_add(&(pool)->hits, nr)
58 #define pp_miss_add(pool, nr)  __pp_dbg_var_add(&(pool)->misses, nr)
59
60 static void pp_clean_cache(void)
61 {
62         if (atomic_read(&pp_dirty)) {
63                 /*
64                  * Make sure any data in the caches is cleaned out before
65                  * passing these pages to userspace. otherwise, It can lead to
66                  * corruption in pages that get mapped as something
67                  * other than WB in userspace and leaked kernel data.
68                  */
69                 inner_clean_cache_all();
70                 outer_clean_all();
71                 atomic_set(&pp_dirty, 0);
72         }
73 }
74
75 /*
76  * Allocate n pages one by one. Not the most efficient allocation scheme ever;
77  * however, it will make it easier later on to handle single or small number of
78  * page allocations from the page pool being individually freed.
79  */
80 static int __nvmap_pp_alloc_n_pages(struct page **pages, int n, gfp_t flags)
81 {
82         int i;
83
84         for (i = 0; i < n; i++) {
85                 pages[i] = alloc_page(flags);
86                 if (!pages[i])
87                         goto no_mem;
88         }
89
90         return 0;
91
92 no_mem:
93         for (i -= 1; i >= 0; i--)
94                 __free_page(pages[i]);
95         return -ENOMEM;
96 }
97
98 /*
99  * Actually do the fill. This requires a few steps:
100  *
101  *  1. Allocate a bunch of pages.
102  *
103  *  2. Fill the page pool with the allocated pages. We don't want to hold the
104  *     PP lock for too long so this is the only time we hold the PP lock.
105  *
106  *  3. Rinse and repeat until we have allocated all the pages we think we need
107  *     or the page pool is full. Since we are not holding the lock for the
108  *     entire fill it is possible that other pages were filled into the pool.
109  *
110  *  4. Free any left over pages if the pool is filled before we finish.
111  */
112 static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
113 {
114         int err;
115         u32 pages = 0, nr, i;
116         gfp_t gfp = GFP_NVMAP | __GFP_NOMEMALLOC |
117                     __GFP_NORETRY | __GFP_NO_KSWAPD;
118
119         pages = (u32)atomic_xchg(&bg_pages_to_fill, pages);
120
121         if (!pages || !enable_pp)
122                 return;
123
124         /* If this param is set, force zero page allocation. */
125         if (zero_memory)
126                 gfp |= __GFP_ZERO;
127
128         do {
129                 nr = min_t(u32, PENDING_PAGES_SIZE, pages);
130                 err = __nvmap_pp_alloc_n_pages(pending_pages, nr, gfp);
131                 if (err) {
132                         pr_info("Failed to alloc %u pages for PP!\n", pages);
133                         return;
134                 }
135
136                 nvmap_page_pool_lock(pool);
137                 atomic_set(&pp_dirty, 1);
138                 i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
139                 nvmap_page_pool_unlock(pool);
140                 pages -= nr;
141         } while (pages && i == nr);
142
143         for (; i < nr; i++)
144                 __free_page(pending_pages[i]);
145         /* clean cache in the background so that allocations immediately
146          * after fill don't suffer the cache clean overhead.
147          */
148         pp_clean_cache();
149 }
150
151 /*
152  * This thread fills the page pools with zeroed pages. We avoid releasing the
153  * pages directly back into the page pools since we would then have to zero
154  * them ourselves. Instead it is easier to just reallocate zeroed pages. This
155  * happens in the background so that the overhead of allocating zeroed pages is
156  * not directly seen by userspace. Of course if the page pools are empty user
157  * space will suffer.
158  */
159 static int nvmap_background_zero_allocator(void *arg)
160 {
161         pr_info("PP alloc thread starting.\n");
162
163         while (1) {
164                 if (kthread_should_stop())
165                         break;
166
167                 nvmap_pp_do_background_fill(&nvmap_dev->pool);
168
169                 /* Pending work is done - go to sleep. */
170                 set_current_state(TASK_INTERRUPTIBLE);
171                 schedule();
172         }
173
174         return 0;
175 }
176
177 /*
178  * Call this if the background allocator should possibly wake up. This function
179  * will check to make sure its actually a good idea for that to happen before
180  * waking the allocator up.
181  */
182 static inline void nvmap_pp_wake_up_allocator(void)
183 {
184         struct nvmap_page_pool *pool = &nvmap_dev->pool;
185         struct sysinfo info;
186         int free_pages, tmp;
187
188         if (!enable_pp)
189                 return;
190
191         /* Hueristic: if we don't need to prefill explicitly zero'ed memory then
192          * lots of memory can be placed back in the pools by possible frees.
193          * Therefor don't fill the pool unless we really need to as we may get
194          * more memory without needing to alloc pages.
195          */
196         if (!zero_memory && pool->count > NVMAP_PP_ZERO_MEM_FILL_MIN)
197                 return;
198
199         if (pool->length - pool->count < NVMAP_PP_DEF_FILL_THRESH)
200                 return;
201
202         si_meminfo(&info);
203         free_pages = (int)info.freeram;
204
205         tmp = free_pages - (MIN_AVAILABLE_MB << (20 - PAGE_SHIFT));
206         if (tmp <= 0)
207                 return;
208
209         /* Let the background thread know how much memory to fill. */
210         atomic_set(&bg_pages_to_fill,
211                    min(tmp, (int)(pool->length - pool->count)));
212         wake_up_process(background_allocator);
213 }
214
215 /*
216  * This removes a page from the page pool. If ignore_disable is set, then
217  * the enable_pp flag is ignored.
218  */
219 static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
220                                                  int force_alloc)
221 {
222         struct page *page;
223
224         if ((!force_alloc && !enable_pp) || !pool->page_array)
225                 return NULL;
226
227         if (pp_empty(pool)) {
228                 pp_miss_add(pool, 1);
229                 return NULL;
230         }
231
232         if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
233                 BUG_ON(pool->count == 0);
234
235         pp_clean_cache();
236         page = pool->page_array[pool->alloc];
237         pool->page_array[pool->alloc] = NULL;
238         nvmap_pp_alloc_inc(pool);
239         pool->count--;
240
241         /* Sanity check. */
242         if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
243                 atomic_dec(&page->_count);
244                 BUG_ON(atomic_read(&page->_count) != 1);
245         }
246
247         pp_alloc_add(pool, 1);
248         pp_hit_add(pool, 1);
249
250         return page;
251 }
252
253 /*
254  * Alloc a bunch of pages from the page pool. This will alloc as many as it can
255  * and return the number of pages allocated. Pages are placed into the passed
256  * array in a linear fashion starting from index 0.
257  *
258  * You must lock the page pool before using this.
259  */
260 int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
261                                         struct page **pages, u32 nr)
262 {
263         u32 real_nr;
264         u32 ind = 0;
265
266         if (!enable_pp || !pool->page_array)
267                 return 0;
268
269         pp_clean_cache();
270
271         real_nr = min_t(u32, nr, pool->count);
272
273         while (real_nr--) {
274                 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
275                         BUG_ON(pp_empty(pool));
276                         BUG_ON(!pool->page_array[pool->alloc]);
277                 }
278                 pages[ind++] = pool->page_array[pool->alloc];
279                 pool->page_array[pool->alloc] = NULL;
280                 nvmap_pp_alloc_inc(pool);
281                 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
282                         atomic_dec(&pages[ind - 1]->_count);
283                         BUG_ON(atomic_read(&pages[ind - 1]->_count) != 1);
284                 }
285         }
286
287         pool->count -= ind;
288         pp_alloc_add(pool, ind);
289         pp_hit_add(pool, ind);
290         pp_miss_add(pool, nr - ind);
291         nvmap_pp_wake_up_allocator();
292
293         return ind;
294 }
295
296 /*
297  * This adds a page to the pool. Returns true if the passed page is added.
298  * That means if the pool is full this operation will fail.
299  */
300 static bool nvmap_page_pool_fill_locked(struct nvmap_page_pool *pool,
301                                         struct page *page)
302 {
303         if (!enable_pp || !pool->page_array)
304                 return false;
305
306         if (pp_full(pool))
307                 return false;
308
309         /* Sanity check. */
310         if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
311                 atomic_inc(&page->_count);
312                 BUG_ON(atomic_read(&page->_count) != 2);
313                 BUG_ON(pool->count > pool->length);
314                 BUG_ON(pool->page_array[pool->fill] != NULL);
315         }
316
317         pool->page_array[pool->fill] = page;
318         nvmap_pp_fill_inc(pool);
319         pool->count++;
320         pp_fill_add(pool, 1);
321
322         return true;
323 }
324
325 /*
326  * Fill a bunch of pages into the page pool. This will fill as many as it can
327  * and return the number of pages filled. Pages are used from the start of the
328  * passed page pointer array in a linear fashion.
329  *
330  * You must lock the page pool before using this.
331  */
332 int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
333                                        struct page **pages, u32 nr)
334 {
335         u32 real_nr;
336         u32 ind = 0;
337
338         if (!enable_pp || !pool->page_array)
339                 return 0;
340
341         real_nr = min_t(u32, pool->length - pool->count, nr);
342         if (real_nr == 0)
343                 return 0;
344
345         while (real_nr--) {
346                 if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
347                         BUG_ON(pp_full(pool));
348                         BUG_ON(pool->page_array[pool->fill]);
349                         atomic_inc(&pages[ind]->_count);
350                         BUG_ON(atomic_read(&pages[ind]->_count) != 2);
351                 }
352                 pool->page_array[pool->fill] = pages[ind++];
353                 nvmap_pp_fill_inc(pool);
354         }
355
356         pool->count += ind;
357         pp_fill_add(pool, ind);
358
359         return ind;
360 }
361
362 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
363 {
364         return pool->count;
365 }
366
367 /*
368  * Free the passed number of pages from the page pool. This happen irregardless
369  * of whether ther page pools are enabled. This lets one disable the page pools
370  * and then free all the memory therein.
371  */
372 static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
373 {
374         int i = nr_free;
375         struct page *page;
376
377         if (!nr_free)
378                 return nr_free;
379
380         nvmap_page_pool_lock(pool);
381         while (i) {
382                 page = nvmap_page_pool_alloc_locked(pool, 1);
383                 if (!page)
384                         break;
385                 __free_page(page);
386                 i--;
387         }
388         nvmap_page_pool_unlock(pool);
389
390         return i;
391 }
392
393 ulong nvmap_page_pool_get_unused_pages(void)
394 {
395         int total = 0;
396
397         if (!nvmap_dev)
398                 return 0;
399
400         total = nvmap_page_pool_get_available_count(&nvmap_dev->pool);
401
402         return total;
403 }
404
405 /*
406  * Remove and free to the system all the pages currently in the page
407  * pool. This operation will happen even if the page pools are disabled.
408  */
409 int nvmap_page_pool_clear(void)
410 {
411         struct page *page;
412         struct nvmap_page_pool *pool = &nvmap_dev->pool;
413
414         if (!pool->page_array)
415                 return 0;
416
417         nvmap_page_pool_lock(pool);
418
419         while ((page = nvmap_page_pool_alloc_locked(pool, 1)) != NULL)
420                 __free_page(page);
421
422         /* For some reason, if an error occured... */
423         if (!pp_empty(pool)) {
424                 nvmap_page_pool_unlock(pool);
425                 return -ENOMEM;
426         }
427
428         nvmap_page_pool_unlock(pool);
429         nvmap_pp_wake_up_allocator();
430
431         return 0;
432 }
433
434 /*
435  * Resizes the page pool to the passed size. If the passed size is 0 then
436  * all associated resources are released back to the system. This operation
437  * will only occur if the page pools are enabled.
438  */
439 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
440 {
441         int ind;
442         struct page **page_array = NULL;
443
444         if (!enable_pp || size == pool->length || size < 0)
445                 return;
446
447         nvmap_page_pool_lock(pool);
448         if (size == 0) {
449                 vfree(pool->page_array);
450                 pool->page_array = NULL;
451                 pool->alloc = 0;
452                 pool->fill = 0;
453                 pool->count = 0;
454                 pool->length = 0;
455                 goto out;
456         }
457
458         page_array = vzalloc(sizeof(struct page *) * size);
459         if (!page_array)
460                 goto fail;
461
462         /*
463          * Reuse what pages we can.
464          */
465         ind = __nvmap_page_pool_alloc_lots_locked(pool, page_array, size);
466
467         /*
468          * And free anything that might be left over.
469          */
470         while (pool->page_array && !pp_empty(pool))
471                 __free_page(nvmap_page_pool_alloc_locked(pool, 0));
472
473         swap(page_array, pool->page_array);
474         pool->alloc = 0;
475         pool->fill = (ind == size ? 0 : ind);
476         pool->count = ind;
477         pool->length = size;
478         pool_size = size;
479         vfree(page_array);
480
481 out:
482         pr_debug("page pool resized to %d from %d pages\n", size, pool->length);
483         pool->length = size;
484         goto exit;
485 fail:
486         vfree(page_array);
487         pr_err("page pool resize failed\n");
488 exit:
489         nvmap_page_pool_unlock(pool);
490 }
491
492 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
493                                   struct shrink_control *sc)
494 {
495         int shrink_pages = sc->nr_to_scan;
496
497         if (!shrink_pages)
498                 goto out;
499
500         pr_debug("sh_pages=%d", shrink_pages);
501
502         shrink_pages = nvmap_page_pool_free(&nvmap_dev->pool, shrink_pages);
503 out:
504         return nvmap_page_pool_get_unused_pages();
505 }
506
507 static struct shrinker nvmap_page_pool_shrinker = {
508         .shrink = nvmap_page_pool_shrink,
509         .seeks = 1,
510 };
511
512 static void shrink_page_pools(int *total_pages, int *available_pages)
513 {
514         struct shrink_control sc;
515
516         if (*total_pages == 0) {
517                 sc.gfp_mask = GFP_KERNEL;
518                 sc.nr_to_scan = 0;
519                 *total_pages = nvmap_page_pool_shrink(NULL, &sc);
520         }
521         sc.nr_to_scan = *total_pages;
522         *available_pages = nvmap_page_pool_shrink(NULL, &sc);
523 }
524
525 #if NVMAP_TEST_PAGE_POOL_SHRINKER
526 static int shrink_pp;
527 static int shrink_set(const char *arg, const struct kernel_param *kp)
528 {
529         int cpu = smp_processor_id();
530         unsigned long long t1, t2;
531         int total_pages, available_pages;
532
533         param_set_int(arg, kp);
534
535         if (shrink_pp) {
536                 total_pages = shrink_pp;
537                 t1 = cpu_clock(cpu);
538                 shrink_page_pools(&total_pages, &available_pages);
539                 t2 = cpu_clock(cpu);
540                 pr_debug("shrink page pools: time=%lldns, "
541                         "total_pages_released=%d, free_pages_available=%d",
542                         t2-t1, total_pages, available_pages);
543         }
544         return 0;
545 }
546
547 static int shrink_get(char *buff, const struct kernel_param *kp)
548 {
549         return param_get_int(buff, kp);
550 }
551
552 static struct kernel_param_ops shrink_ops = {
553         .get = shrink_get,
554         .set = shrink_set,
555 };
556
557 module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
558 #endif
559
560 static int enable_pp_set(const char *arg, const struct kernel_param *kp)
561 {
562         int ret;
563
564         ret = param_set_bool(arg, kp);
565         if (ret)
566                 return ret;
567
568         if (!enable_pp)
569                 nvmap_page_pool_clear();
570
571         return 0;
572 }
573
574 static int enable_pp_get(char *buff, const struct kernel_param *kp)
575 {
576         return param_get_int(buff, kp);
577 }
578
579 static struct kernel_param_ops enable_pp_ops = {
580         .get = enable_pp_get,
581         .set = enable_pp_set,
582 };
583
584 module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
585
586 static int pool_size_set(const char *arg, const struct kernel_param *kp)
587 {
588         param_set_int(arg, kp);
589         nvmap_page_pool_resize(&nvmap_dev->pool, pool_size);
590         return 0;
591 }
592
593 static int pool_size_get(char *buff, const struct kernel_param *kp)
594 {
595         return param_get_int(buff, kp);
596 }
597
598 static struct kernel_param_ops pool_size_ops = {
599         .get = pool_size_get,
600         .set = pool_size_set,
601 };
602
603 module_param_cb(pool_size, &pool_size_ops, &pool_size, 0644);
604
605 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
606 {
607         struct dentry *pp_root;
608
609         if (!nvmap_root)
610                 return -ENODEV;
611
612         pp_root = debugfs_create_dir("pagepool", nvmap_root);
613         if (!pp_root)
614                 return -ENODEV;
615
616         debugfs_create_u32("page_pool_available_pages",
617                            S_IRUGO, pp_root,
618                            &nvmap_dev->pool.count);
619 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
620         debugfs_create_u32("page_pool_alloc_ind",
621                            S_IRUGO, pp_root,
622                            &nvmap_dev->pool.alloc);
623         debugfs_create_u32("page_pool_fill_ind",
624                            S_IRUGO, pp_root,
625                            &nvmap_dev->pool.fill);
626         debugfs_create_u64("page_pool_allocs",
627                            S_IRUGO, pp_root,
628                            &nvmap_dev->pool.allocs);
629         debugfs_create_u64("page_pool_fills",
630                            S_IRUGO, pp_root,
631                            &nvmap_dev->pool.fills);
632         debugfs_create_u64("page_pool_hits",
633                            S_IRUGO, pp_root,
634                            &nvmap_dev->pool.hits);
635         debugfs_create_u64("page_pool_misses",
636                            S_IRUGO, pp_root,
637                            &nvmap_dev->pool.misses);
638 #endif
639
640         return 0;
641 }
642
643 int nvmap_page_pool_init(struct nvmap_device *dev)
644 {
645         static int reg = 1;
646         unsigned long totalram_mb;
647         struct sysinfo info;
648         struct nvmap_page_pool *pool = &dev->pool;
649 #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
650         int i;
651         struct page *page;
652         int pages_to_fill;
653         int highmem_pages = 0;
654 #endif
655
656         memset(pool, 0x0, sizeof(*pool));
657         mutex_init(&pool->lock);
658
659         si_meminfo(&info);
660         totalram_mb = (info.totalram * info.mem_unit) >> 20;
661         pr_info("Total MB RAM: %lu\n", totalram_mb);
662
663         if (!CONFIG_NVMAP_PAGE_POOL_SIZE)
664                 /* The ratio is KB to MB so this ends up being mem in KB which
665                  * when >> 2 -> total pages in the pool. */
666                 pool->length = (totalram_mb * NVMAP_PP_POOL_SIZE) >> 2;
667         else
668                 pool->length = CONFIG_NVMAP_PAGE_POOL_SIZE;
669
670         if (pool->length >= info.totalram)
671                 goto fail;
672         pool_size = pool->length;
673
674         pr_info("nvmap page pool size: %u pages (%u MB)\n", pool->length,
675                 pool->length >> 8);
676         pool->page_array = vzalloc(sizeof(struct page *) * pool->length);
677         if (!pool->page_array)
678                 goto fail;
679
680         if (reg) {
681                 reg = 0;
682                 register_shrinker(&nvmap_page_pool_shrinker);
683         }
684
685         background_allocator = kthread_create(nvmap_background_zero_allocator,
686                                             NULL, "nvmap-bz");
687         if (IS_ERR_OR_NULL(background_allocator))
688                 goto fail;
689
690 #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
691         pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M /
692                         PAGE_SIZE;
693         pages_to_fill = pages_to_fill ? : pool->length;
694
695         nvmap_page_pool_lock(pool);
696         atomic_set(&pp_dirty, 1);
697         for (i = 0; i < pages_to_fill; i++) {
698                 page = alloc_page(GFP_NVMAP);
699                 if (!page)
700                         goto done;
701                 if (!nvmap_page_pool_fill_locked(pool, page)) {
702                         __free_page(page);
703                         goto done;
704                 }
705                 if (PageHighMem(page))
706                         highmem_pages++;
707         }
708
709         si_meminfo(&info);
710         pr_info("highmem=%d, pool_size=%d,"
711                 "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n",
712                 highmem_pages, pool->length,
713                 info.totalram, info.freeram, info.totalhigh, info.freehigh);
714 done:
715         pp_clean_cache();
716         nvmap_page_pool_unlock(pool);
717 #endif
718         return 0;
719 fail:
720         nvmap_page_pool_fini(dev);
721         return -ENOMEM;
722 }
723
724 int nvmap_page_pool_fini(struct nvmap_device *dev)
725 {
726         struct nvmap_page_pool *pool = &dev->pool;
727
728         if (!IS_ERR_OR_NULL(background_allocator))
729                 kthread_stop(background_allocator);
730         pool->length = 0;
731         vfree(pool->page_array);
732
733         return 0;
734 }